2024-11-25 19:24:10,298 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-25 19:24:10,316 main DEBUG Took 0.014580 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-25 19:24:10,316 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-25 19:24:10,317 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-25 19:24:10,319 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-25 19:24:10,320 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 19:24:10,331 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-25 19:24:10,346 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 19:24:10,348 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 19:24:10,348 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 19:24:10,349 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 19:24:10,350 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 19:24:10,350 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 19:24:10,352 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 19:24:10,352 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 19:24:10,353 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 19:24:10,353 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 19:24:10,355 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 19:24:10,355 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 19:24:10,356 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 19:24:10,356 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 19:24:10,357 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 19:24:10,358 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 19:24:10,358 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 19:24:10,359 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 19:24:10,360 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 19:24:10,360 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 19:24:10,361 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 19:24:10,361 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 19:24:10,362 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 19:24:10,362 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 19:24:10,363 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 19:24:10,363 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-25 19:24:10,365 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 19:24:10,367 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-25 19:24:10,370 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-25 19:24:10,370 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-25 19:24:10,372 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-25 19:24:10,373 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-25 19:24:10,385 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-25 19:24:10,389 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-25 19:24:10,391 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-25 19:24:10,392 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-25 19:24:10,393 main DEBUG createAppenders(={Console}) 2024-11-25 19:24:10,394 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-25 19:24:10,394 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-25 19:24:10,394 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-25 19:24:10,395 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-25 19:24:10,396 main DEBUG OutputStream closed 2024-11-25 19:24:10,396 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-25 19:24:10,396 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-25 19:24:10,397 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-25 19:24:10,500 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-25 19:24:10,503 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-25 19:24:10,505 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-25 19:24:10,507 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-25 19:24:10,508 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-25 19:24:10,508 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-25 19:24:10,509 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-25 19:24:10,509 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-25 19:24:10,510 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-25 19:24:10,510 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-25 19:24:10,511 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-25 19:24:10,512 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-25 19:24:10,512 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-25 19:24:10,513 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-25 19:24:10,513 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-25 19:24:10,514 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-25 19:24:10,514 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-25 19:24:10,515 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-25 19:24:10,519 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-25 19:24:10,519 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-25 19:24:10,520 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-25 19:24:10,521 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-25T19:24:10,862 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2 2024-11-25 19:24:10,866 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-25 19:24:10,866 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-25T19:24:10,878 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-25T19:24:10,925 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=254, ProcessCount=11, AvailableMemoryMB=7215 2024-11-25T19:24:10,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T19:24:10,945 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/cluster_6c29350f-7e7e-fdb0-c988-af83e7e25fff, deleteOnExit=true 2024-11-25T19:24:10,945 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-25T19:24:10,946 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/test.cache.data in system properties and HBase conf 2024-11-25T19:24:10,947 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T19:24:10,948 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/hadoop.log.dir in system properties and HBase conf 2024-11-25T19:24:10,948 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T19:24:10,949 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T19:24:10,949 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T19:24:11,031 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-25T19:24:11,138 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T19:24:11,144 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T19:24:11,145 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T19:24:11,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T19:24:11,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T19:24:11,148 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T19:24:11,148 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T19:24:11,149 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T19:24:11,150 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T19:24:11,151 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T19:24:11,151 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/nfs.dump.dir in system properties and HBase conf 2024-11-25T19:24:11,152 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/java.io.tmpdir in system properties and HBase conf 2024-11-25T19:24:11,153 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T19:24:11,153 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T19:24:11,154 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T19:24:11,897 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T19:24:12,267 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-25T19:24:12,386 INFO [Time-limited test {}] log.Log(170): Logging initialized @2973ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-25T19:24:12,503 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:24:12,599 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:24:12,631 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:24:12,632 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:24:12,633 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T19:24:12,648 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:24:12,653 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:24:12,655 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:24:12,889 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/java.io.tmpdir/jetty-localhost-33881-hadoop-hdfs-3_4_1-tests_jar-_-any-12347107493214772220/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T19:24:12,903 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:33881} 2024-11-25T19:24:12,904 INFO [Time-limited test {}] server.Server(415): Started @3492ms 2024-11-25T19:24:12,941 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T19:24:13,299 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:24:13,308 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:24:13,310 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:24:13,310 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:24:13,310 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T19:24:13,312 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:24:13,312 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:24:13,441 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/java.io.tmpdir/jetty-localhost-39801-hadoop-hdfs-3_4_1-tests_jar-_-any-232139926694430366/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:24:13,442 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:39801} 2024-11-25T19:24:13,442 INFO [Time-limited test {}] server.Server(415): Started @4030ms 2024-11-25T19:24:13,504 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T19:24:13,660 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:24:13,668 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:24:13,669 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:24:13,670 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:24:13,670 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T19:24:13,671 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:24:13,672 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:24:13,809 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/java.io.tmpdir/jetty-localhost-42369-hadoop-hdfs-3_4_1-tests_jar-_-any-15170015219954834321/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:24:13,810 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:42369} 2024-11-25T19:24:13,810 INFO [Time-limited test {}] server.Server(415): Started @4399ms 2024-11-25T19:24:13,813 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T19:24:14,000 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/cluster_6c29350f-7e7e-fdb0-c988-af83e7e25fff/data/data2/current/BP-1956332910-172.17.0.2-1732562652023/current, will proceed with Du for space computation calculation, 2024-11-25T19:24:14,000 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/cluster_6c29350f-7e7e-fdb0-c988-af83e7e25fff/data/data4/current/BP-1956332910-172.17.0.2-1732562652023/current, will proceed with Du for space computation calculation, 2024-11-25T19:24:14,000 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/cluster_6c29350f-7e7e-fdb0-c988-af83e7e25fff/data/data1/current/BP-1956332910-172.17.0.2-1732562652023/current, will proceed with Du for space computation calculation, 2024-11-25T19:24:14,000 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/cluster_6c29350f-7e7e-fdb0-c988-af83e7e25fff/data/data3/current/BP-1956332910-172.17.0.2-1732562652023/current, will proceed with Du for space computation calculation, 2024-11-25T19:24:14,078 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T19:24:14,079 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T19:24:14,166 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4111514f07f53874 with lease ID 0x7d6478a876fba105: Processing first storage report for DS-5dac31d6-1394-469f-b646-2077024ab044 from datanode DatanodeRegistration(127.0.0.1:41765, datanodeUuid=21fec8a6-fc95-48fd-8081-770a0bdcffc0, infoPort=33025, infoSecurePort=0, ipcPort=39583, storageInfo=lv=-57;cid=testClusterID;nsid=14007767;c=1732562652023) 2024-11-25T19:24:14,168 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4111514f07f53874 with lease ID 0x7d6478a876fba105: from storage DS-5dac31d6-1394-469f-b646-2077024ab044 node DatanodeRegistration(127.0.0.1:41765, datanodeUuid=21fec8a6-fc95-48fd-8081-770a0bdcffc0, infoPort=33025, infoSecurePort=0, ipcPort=39583, storageInfo=lv=-57;cid=testClusterID;nsid=14007767;c=1732562652023), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-25T19:24:14,169 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x541098842dc8b4f3 with lease ID 0x7d6478a876fba106: Processing first storage report for DS-8a6e0222-9103-41cb-a269-2388de6091a9 from datanode DatanodeRegistration(127.0.0.1:44037, datanodeUuid=01c92f29-cfbd-4131-9d6d-e5c27e28e3b7, infoPort=38421, infoSecurePort=0, ipcPort=39435, storageInfo=lv=-57;cid=testClusterID;nsid=14007767;c=1732562652023) 2024-11-25T19:24:14,169 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x541098842dc8b4f3 with lease ID 0x7d6478a876fba106: from storage DS-8a6e0222-9103-41cb-a269-2388de6091a9 node DatanodeRegistration(127.0.0.1:44037, datanodeUuid=01c92f29-cfbd-4131-9d6d-e5c27e28e3b7, infoPort=38421, infoSecurePort=0, ipcPort=39435, storageInfo=lv=-57;cid=testClusterID;nsid=14007767;c=1732562652023), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-25T19:24:14,170 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4111514f07f53874 with lease ID 0x7d6478a876fba105: Processing first storage report for DS-9c792995-3372-4b8f-a398-5f7df1b19442 from datanode DatanodeRegistration(127.0.0.1:41765, datanodeUuid=21fec8a6-fc95-48fd-8081-770a0bdcffc0, infoPort=33025, infoSecurePort=0, ipcPort=39583, storageInfo=lv=-57;cid=testClusterID;nsid=14007767;c=1732562652023) 2024-11-25T19:24:14,170 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4111514f07f53874 with lease ID 0x7d6478a876fba105: from storage DS-9c792995-3372-4b8f-a398-5f7df1b19442 node DatanodeRegistration(127.0.0.1:41765, datanodeUuid=21fec8a6-fc95-48fd-8081-770a0bdcffc0, infoPort=33025, infoSecurePort=0, ipcPort=39583, storageInfo=lv=-57;cid=testClusterID;nsid=14007767;c=1732562652023), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-25T19:24:14,170 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x541098842dc8b4f3 with lease ID 0x7d6478a876fba106: Processing first storage report for DS-af44a0d4-97e6-4a41-a08b-8cf6b848baff from datanode DatanodeRegistration(127.0.0.1:44037, datanodeUuid=01c92f29-cfbd-4131-9d6d-e5c27e28e3b7, infoPort=38421, infoSecurePort=0, ipcPort=39435, storageInfo=lv=-57;cid=testClusterID;nsid=14007767;c=1732562652023) 2024-11-25T19:24:14,171 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x541098842dc8b4f3 with lease ID 0x7d6478a876fba106: from storage DS-af44a0d4-97e6-4a41-a08b-8cf6b848baff node DatanodeRegistration(127.0.0.1:44037, datanodeUuid=01c92f29-cfbd-4131-9d6d-e5c27e28e3b7, infoPort=38421, infoSecurePort=0, ipcPort=39435, storageInfo=lv=-57;cid=testClusterID;nsid=14007767;c=1732562652023), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:24:14,249 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2 2024-11-25T19:24:14,344 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/cluster_6c29350f-7e7e-fdb0-c988-af83e7e25fff/zookeeper_0, clientPort=60717, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/cluster_6c29350f-7e7e-fdb0-c988-af83e7e25fff/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/cluster_6c29350f-7e7e-fdb0-c988-af83e7e25fff/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T19:24:14,354 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60717 2024-11-25T19:24:14,368 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:24:14,373 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:24:14,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741825_1001 (size=7) 2024-11-25T19:24:14,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741825_1001 (size=7) 2024-11-25T19:24:14,710 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678 with version=8 2024-11-25T19:24:14,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/hbase-staging 2024-11-25T19:24:14,832 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-25T19:24:15,103 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6ef6ccb75414:0 server-side Connection retries=45 2024-11-25T19:24:15,120 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:24:15,121 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T19:24:15,127 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T19:24:15,128 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:24:15,128 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T19:24:15,326 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T19:24:15,390 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-25T19:24:15,401 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-25T19:24:15,404 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T19:24:15,430 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 62496 (auto-detected) 2024-11-25T19:24:15,432 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-25T19:24:15,453 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34569 2024-11-25T19:24:15,473 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34569 connecting to ZooKeeper ensemble=127.0.0.1:60717 2024-11-25T19:24:15,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:345690x0, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T19:24:15,508 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34569-0x100785770e30000 connected 2024-11-25T19:24:15,541 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:24:15,544 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:24:15,558 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:24:15,562 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678, hbase.cluster.distributed=false 2024-11-25T19:24:15,589 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T19:24:15,595 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34569 2024-11-25T19:24:15,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34569 2024-11-25T19:24:15,598 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34569 2024-11-25T19:24:15,603 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34569 2024-11-25T19:24:15,604 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34569 2024-11-25T19:24:15,722 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6ef6ccb75414:0 server-side Connection retries=45 2024-11-25T19:24:15,724 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:24:15,724 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T19:24:15,724 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T19:24:15,725 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:24:15,725 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T19:24:15,728 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T19:24:15,732 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T19:24:15,733 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46159 2024-11-25T19:24:15,735 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46159 connecting to ZooKeeper ensemble=127.0.0.1:60717 2024-11-25T19:24:15,736 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:24:15,739 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:24:15,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:461590x0, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T19:24:15,747 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:461590x0, quorum=127.0.0.1:60717, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:24:15,747 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46159-0x100785770e30001 connected 2024-11-25T19:24:15,753 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T19:24:15,769 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T19:24:15,773 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46159-0x100785770e30001, quorum=127.0.0.1:60717, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T19:24:15,780 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46159-0x100785770e30001, quorum=127.0.0.1:60717, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T19:24:15,787 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46159 2024-11-25T19:24:15,790 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46159 2024-11-25T19:24:15,791 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46159 2024-11-25T19:24:15,792 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46159 2024-11-25T19:24:15,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46159 2024-11-25T19:24:15,817 DEBUG [M:0;6ef6ccb75414:34569 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6ef6ccb75414:34569 2024-11-25T19:24:15,821 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6ef6ccb75414,34569,1732562654897 2024-11-25T19:24:15,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46159-0x100785770e30001, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:24:15,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:24:15,831 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6ef6ccb75414,34569,1732562654897 2024-11-25T19:24:15,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46159-0x100785770e30001, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T19:24:15,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:24:15,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46159-0x100785770e30001, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:24:15,855 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T19:24:15,856 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6ef6ccb75414,34569,1732562654897 from backup master directory 2024-11-25T19:24:15,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46159-0x100785770e30001, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:24:15,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6ef6ccb75414,34569,1732562654897 2024-11-25T19:24:15,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:24:15,864 WARN [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T19:24:15,865 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6ef6ccb75414,34569,1732562654897 2024-11-25T19:24:15,867 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-25T19:24:15,869 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-25T19:24:15,932 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/hbase.id] with ID: 5b159736-677a-4ffe-ae58-34dcc90762e2 2024-11-25T19:24:15,932 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/.tmp/hbase.id 2024-11-25T19:24:15,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741826_1002 (size=42) 2024-11-25T19:24:15,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741826_1002 (size=42) 2024-11-25T19:24:15,956 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/.tmp/hbase.id]:[hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/hbase.id] 2024-11-25T19:24:16,010 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:24:16,016 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T19:24:16,038 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 19ms. 2024-11-25T19:24:16,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46159-0x100785770e30001, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:24:16,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:24:16,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741827_1003 (size=196) 2024-11-25T19:24:16,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741827_1003 (size=196) 2024-11-25T19:24:16,083 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T19:24:16,087 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T19:24:16,096 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:24:16,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741828_1004 (size=1189) 2024-11-25T19:24:16,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741828_1004 (size=1189) 2024-11-25T19:24:16,172 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store 2024-11-25T19:24:16,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741829_1005 (size=34) 2024-11-25T19:24:16,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741829_1005 (size=34) 2024-11-25T19:24:16,606 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-25T19:24:16,609 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:24:16,611 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T19:24:16,612 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:24:16,612 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:24:16,615 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T19:24:16,615 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:24:16,615 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:24:16,617 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732562656611Disabling compacts and flushes for region at 1732562656611Disabling writes for close at 1732562656615 (+4 ms)Writing region close event to WAL at 1732562656615Closed at 1732562656615 2024-11-25T19:24:16,620 WARN [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/.initializing 2024-11-25T19:24:16,620 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/WALs/6ef6ccb75414,34569,1732562654897 2024-11-25T19:24:16,645 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ef6ccb75414%2C34569%2C1732562654897, suffix=, logDir=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/WALs/6ef6ccb75414,34569,1732562654897, archiveDir=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/oldWALs, maxLogs=10 2024-11-25T19:24:16,657 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C34569%2C1732562654897.1732562656651 2024-11-25T19:24:16,684 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/WALs/6ef6ccb75414,34569,1732562654897/6ef6ccb75414%2C34569%2C1732562654897.1732562656651 2024-11-25T19:24:16,698 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33025:33025),(127.0.0.1/127.0.0.1:38421:38421)] 2024-11-25T19:24:16,703 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T19:24:16,704 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:24:16,709 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:24:16,711 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:24:16,767 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:24:16,801 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T19:24:16,805 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:24:16,811 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:24:16,812 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:24:16,819 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T19:24:16,819 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:24:16,820 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:24:16,821 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:24:16,824 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T19:24:16,825 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:24:16,826 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:24:16,827 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:24:16,831 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T19:24:16,831 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:24:16,834 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:24:16,835 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:24:16,840 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:24:16,842 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:24:16,849 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:24:16,850 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:24:16,853 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T19:24:16,858 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:24:16,867 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T19:24:16,870 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=883757, jitterRate=0.12375541031360626}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T19:24:16,880 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732562656731Initializing all the Stores at 1732562656734 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562656734Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562656735 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562656736 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562656736Cleaning up temporary data from old regions at 1732562656850 (+114 ms)Region opened successfully at 1732562656880 (+30 ms) 2024-11-25T19:24:16,882 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T19:24:16,929 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3dcaaf5a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ef6ccb75414/172.17.0.2:0 2024-11-25T19:24:16,973 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T19:24:16,989 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T19:24:16,989 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T19:24:16,995 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T19:24:16,998 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 2 msec 2024-11-25T19:24:17,003 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-25T19:24:17,004 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T19:24:17,039 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T19:24:17,051 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T19:24:17,053 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T19:24:17,056 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T19:24:17,058 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T19:24:17,059 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T19:24:17,062 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T19:24:17,071 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T19:24:17,073 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T19:24:17,075 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T19:24:17,076 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T19:24:17,096 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T19:24:17,097 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T19:24:17,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46159-0x100785770e30001, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T19:24:17,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T19:24:17,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46159-0x100785770e30001, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:24:17,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:24:17,109 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6ef6ccb75414,34569,1732562654897, sessionid=0x100785770e30000, setting cluster-up flag (Was=false) 2024-11-25T19:24:17,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46159-0x100785770e30001, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:24:17,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:24:17,134 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T19:24:17,137 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6ef6ccb75414,34569,1732562654897 2024-11-25T19:24:17,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46159-0x100785770e30001, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:24:17,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:24:17,150 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T19:24:17,152 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6ef6ccb75414,34569,1732562654897 2024-11-25T19:24:17,160 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T19:24:17,199 INFO [RS:0;6ef6ccb75414:46159 {}] regionserver.HRegionServer(746): ClusterId : 5b159736-677a-4ffe-ae58-34dcc90762e2 2024-11-25T19:24:17,202 DEBUG [RS:0;6ef6ccb75414:46159 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T19:24:17,206 DEBUG [RS:0;6ef6ccb75414:46159 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T19:24:17,207 DEBUG [RS:0;6ef6ccb75414:46159 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T19:24:17,210 DEBUG [RS:0;6ef6ccb75414:46159 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T19:24:17,211 DEBUG [RS:0;6ef6ccb75414:46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ad6a2d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ef6ccb75414/172.17.0.2:0 2024-11-25T19:24:17,233 DEBUG [RS:0;6ef6ccb75414:46159 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6ef6ccb75414:46159 2024-11-25T19:24:17,238 INFO [RS:0;6ef6ccb75414:46159 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T19:24:17,238 INFO [RS:0;6ef6ccb75414:46159 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T19:24:17,238 DEBUG [RS:0;6ef6ccb75414:46159 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T19:24:17,241 INFO [RS:0;6ef6ccb75414:46159 {}] regionserver.HRegionServer(2659): reportForDuty to master=6ef6ccb75414,34569,1732562654897 with port=46159, startcode=1732562655686 2024-11-25T19:24:17,258 DEBUG [RS:0;6ef6ccb75414:46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T19:24:17,257 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T19:24:17,271 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T19:24:17,279 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T19:24:17,285 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6ef6ccb75414,34569,1732562654897 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T19:24:17,295 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:24:17,295 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:24:17,296 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:24:17,296 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:24:17,296 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6ef6ccb75414:0, corePoolSize=10, maxPoolSize=10 2024-11-25T19:24:17,296 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:24:17,297 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=2, maxPoolSize=2 2024-11-25T19:24:17,297 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:24:17,303 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732562687303 2024-11-25T19:24:17,305 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T19:24:17,306 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T19:24:17,306 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T19:24:17,306 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T19:24:17,311 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T19:24:17,312 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T19:24:17,312 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T19:24:17,313 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T19:24:17,313 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:24:17,313 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T19:24:17,313 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T19:24:17,319 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T19:24:17,320 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T19:24:17,321 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T19:24:17,325 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T19:24:17,326 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T19:24:17,330 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.large.0-1732562657327,5,FailOnTimeoutGroup] 2024-11-25T19:24:17,331 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.small.0-1732562657330,5,FailOnTimeoutGroup] 2024-11-25T19:24:17,331 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T19:24:17,332 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T19:24:17,334 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T19:24:17,338 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T19:24:17,339 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41931, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T19:24:17,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741831_1007 (size=1321) 2024-11-25T19:24:17,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741831_1007 (size=1321) 2024-11-25T19:24:17,347 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34569 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6ef6ccb75414,46159,1732562655686 2024-11-25T19:24:17,348 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T19:24:17,348 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678 2024-11-25T19:24:17,351 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34569 {}] master.ServerManager(517): Registering regionserver=6ef6ccb75414,46159,1732562655686 2024-11-25T19:24:17,369 DEBUG [RS:0;6ef6ccb75414:46159 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678 2024-11-25T19:24:17,369 DEBUG [RS:0;6ef6ccb75414:46159 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35499 2024-11-25T19:24:17,369 DEBUG [RS:0;6ef6ccb75414:46159 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T19:24:17,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T19:24:17,376 DEBUG [RS:0;6ef6ccb75414:46159 {}] zookeeper.ZKUtil(111): regionserver:46159-0x100785770e30001, quorum=127.0.0.1:60717, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6ef6ccb75414,46159,1732562655686 2024-11-25T19:24:17,376 WARN [RS:0;6ef6ccb75414:46159 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T19:24:17,376 INFO [RS:0;6ef6ccb75414:46159 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:24:17,377 DEBUG [RS:0;6ef6ccb75414:46159 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686 2024-11-25T19:24:17,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741832_1008 (size=32) 2024-11-25T19:24:17,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741832_1008 (size=32) 2024-11-25T19:24:17,387 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6ef6ccb75414,46159,1732562655686] 2024-11-25T19:24:17,389 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:24:17,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T19:24:17,403 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T19:24:17,403 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:24:17,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:24:17,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T19:24:17,408 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T19:24:17,408 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:24:17,409 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:24:17,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T19:24:17,416 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T19:24:17,417 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:24:17,418 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:24:17,419 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T19:24:17,421 INFO [RS:0;6ef6ccb75414:46159 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T19:24:17,423 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T19:24:17,423 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:24:17,425 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:24:17,426 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T19:24:17,427 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/hbase/meta/1588230740 2024-11-25T19:24:17,428 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/hbase/meta/1588230740 2024-11-25T19:24:17,432 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T19:24:17,433 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T19:24:17,434 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T19:24:17,438 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T19:24:17,443 INFO [RS:0;6ef6ccb75414:46159 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T19:24:17,449 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T19:24:17,453 INFO [RS:0;6ef6ccb75414:46159 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T19:24:17,453 INFO [RS:0;6ef6ccb75414:46159 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:24:17,453 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=696301, jitterRate=-0.1146087497472763}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T19:24:17,454 INFO [RS:0;6ef6ccb75414:46159 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T19:24:17,457 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732562657389Initializing all the Stores at 1732562657392 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562657392Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562657399 (+7 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562657399Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562657399Cleaning up temporary data from old regions at 1732562657433 (+34 ms)Region opened successfully at 1732562657457 (+24 ms) 2024-11-25T19:24:17,457 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T19:24:17,458 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T19:24:17,458 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T19:24:17,458 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T19:24:17,458 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T19:24:17,459 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T19:24:17,459 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732562657457Disabling compacts and flushes for region at 1732562657457Disabling writes for close at 1732562657458 (+1 ms)Writing region close event to WAL at 1732562657459 (+1 ms)Closed at 1732562657459 2024-11-25T19:24:17,461 INFO [RS:0;6ef6ccb75414:46159 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T19:24:17,462 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T19:24:17,462 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T19:24:17,462 INFO [RS:0;6ef6ccb75414:46159 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T19:24:17,463 DEBUG [RS:0;6ef6ccb75414:46159 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:24:17,463 DEBUG [RS:0;6ef6ccb75414:46159 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:24:17,463 DEBUG [RS:0;6ef6ccb75414:46159 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:24:17,463 DEBUG [RS:0;6ef6ccb75414:46159 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:24:17,463 DEBUG [RS:0;6ef6ccb75414:46159 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:24:17,464 DEBUG [RS:0;6ef6ccb75414:46159 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6ef6ccb75414:0, corePoolSize=2, maxPoolSize=2 2024-11-25T19:24:17,464 DEBUG [RS:0;6ef6ccb75414:46159 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:24:17,464 DEBUG [RS:0;6ef6ccb75414:46159 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:24:17,464 DEBUG [RS:0;6ef6ccb75414:46159 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:24:17,464 DEBUG [RS:0;6ef6ccb75414:46159 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:24:17,465 DEBUG [RS:0;6ef6ccb75414:46159 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:24:17,465 DEBUG [RS:0;6ef6ccb75414:46159 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:24:17,465 DEBUG [RS:0;6ef6ccb75414:46159 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6ef6ccb75414:0, corePoolSize=3, maxPoolSize=3 2024-11-25T19:24:17,465 DEBUG [RS:0;6ef6ccb75414:46159 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0, corePoolSize=3, maxPoolSize=3 2024-11-25T19:24:17,469 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T19:24:17,469 INFO [RS:0;6ef6ccb75414:46159 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T19:24:17,469 INFO [RS:0;6ef6ccb75414:46159 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T19:24:17,469 INFO [RS:0;6ef6ccb75414:46159 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:24:17,471 INFO [RS:0;6ef6ccb75414:46159 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T19:24:17,472 INFO [RS:0;6ef6ccb75414:46159 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T19:24:17,472 INFO [RS:0;6ef6ccb75414:46159 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,46159,1732562655686-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T19:24:17,486 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T19:24:17,488 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T19:24:17,495 INFO [RS:0;6ef6ccb75414:46159 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T19:24:17,497 INFO [RS:0;6ef6ccb75414:46159 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,46159,1732562655686-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:24:17,498 INFO [RS:0;6ef6ccb75414:46159 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:24:17,498 INFO [RS:0;6ef6ccb75414:46159 {}] regionserver.Replication(171): 6ef6ccb75414,46159,1732562655686 started 2024-11-25T19:24:17,515 INFO [RS:0;6ef6ccb75414:46159 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:24:17,516 INFO [RS:0;6ef6ccb75414:46159 {}] regionserver.HRegionServer(1482): Serving as 6ef6ccb75414,46159,1732562655686, RpcServer on 6ef6ccb75414/172.17.0.2:46159, sessionid=0x100785770e30001 2024-11-25T19:24:17,516 DEBUG [RS:0;6ef6ccb75414:46159 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T19:24:17,517 DEBUG [RS:0;6ef6ccb75414:46159 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6ef6ccb75414,46159,1732562655686 2024-11-25T19:24:17,517 DEBUG [RS:0;6ef6ccb75414:46159 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ef6ccb75414,46159,1732562655686' 2024-11-25T19:24:17,517 DEBUG [RS:0;6ef6ccb75414:46159 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T19:24:17,518 DEBUG [RS:0;6ef6ccb75414:46159 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T19:24:17,519 DEBUG [RS:0;6ef6ccb75414:46159 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T19:24:17,519 DEBUG [RS:0;6ef6ccb75414:46159 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T19:24:17,519 DEBUG [RS:0;6ef6ccb75414:46159 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6ef6ccb75414,46159,1732562655686 2024-11-25T19:24:17,519 DEBUG [RS:0;6ef6ccb75414:46159 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ef6ccb75414,46159,1732562655686' 2024-11-25T19:24:17,519 DEBUG [RS:0;6ef6ccb75414:46159 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T19:24:17,520 DEBUG [RS:0;6ef6ccb75414:46159 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T19:24:17,521 DEBUG [RS:0;6ef6ccb75414:46159 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T19:24:17,521 INFO [RS:0;6ef6ccb75414:46159 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T19:24:17,521 INFO [RS:0;6ef6ccb75414:46159 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T19:24:17,632 INFO [RS:0;6ef6ccb75414:46159 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ef6ccb75414%2C46159%2C1732562655686, suffix=, logDir=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686, archiveDir=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/oldWALs, maxLogs=32 2024-11-25T19:24:17,636 INFO [RS:0;6ef6ccb75414:46159 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C46159%2C1732562655686.1732562657635 2024-11-25T19:24:17,639 WARN [6ef6ccb75414:34569 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-25T19:24:17,659 INFO [RS:0;6ef6ccb75414:46159 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562657635 2024-11-25T19:24:17,673 DEBUG [RS:0;6ef6ccb75414:46159 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38421:38421),(127.0.0.1/127.0.0.1:33025:33025)] 2024-11-25T19:24:17,892 DEBUG [6ef6ccb75414:34569 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-25T19:24:17,902 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6ef6ccb75414,46159,1732562655686 2024-11-25T19:24:17,909 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6ef6ccb75414,46159,1732562655686, state=OPENING 2024-11-25T19:24:17,913 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T19:24:17,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:24:17,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46159-0x100785770e30001, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:24:17,915 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:24:17,915 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:24:17,917 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T19:24:17,919 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6ef6ccb75414,46159,1732562655686}] 2024-11-25T19:24:18,098 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T19:24:18,101 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52001, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T19:24:18,114 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T19:24:18,114 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:24:18,119 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ef6ccb75414%2C46159%2C1732562655686.meta, suffix=.meta, logDir=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686, archiveDir=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/oldWALs, maxLogs=32 2024-11-25T19:24:18,123 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C46159%2C1732562655686.meta.1732562658123.meta 2024-11-25T19:24:18,131 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.meta.1732562658123.meta 2024-11-25T19:24:18,132 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33025:33025),(127.0.0.1/127.0.0.1:38421:38421)] 2024-11-25T19:24:18,133 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T19:24:18,135 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T19:24:18,138 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T19:24:18,144 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T19:24:18,149 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T19:24:18,150 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:24:18,150 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T19:24:18,150 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T19:24:18,153 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T19:24:18,155 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T19:24:18,155 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:24:18,156 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:24:18,156 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T19:24:18,159 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T19:24:18,159 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:24:18,160 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:24:18,161 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T19:24:18,163 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T19:24:18,164 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:24:18,165 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:24:18,165 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T19:24:18,167 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T19:24:18,167 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:24:18,168 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:24:18,168 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T19:24:18,169 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/hbase/meta/1588230740 2024-11-25T19:24:18,172 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/hbase/meta/1588230740 2024-11-25T19:24:18,175 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T19:24:18,175 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T19:24:18,177 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T19:24:18,180 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T19:24:18,183 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=854909, jitterRate=0.08707320690155029}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T19:24:18,183 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T19:24:18,185 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732562658150Writing region info on filesystem at 1732562658151 (+1 ms)Initializing all the Stores at 1732562658152 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562658153 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562658153Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562658153Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562658153Cleaning up temporary data from old regions at 1732562658175 (+22 ms)Running coprocessor post-open hooks at 1732562658183 (+8 ms)Region opened successfully at 1732562658185 (+2 ms) 2024-11-25T19:24:18,195 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732562658086 2024-11-25T19:24:18,211 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T19:24:18,212 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T19:24:18,213 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6ef6ccb75414,46159,1732562655686 2024-11-25T19:24:18,217 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6ef6ccb75414,46159,1732562655686, state=OPEN 2024-11-25T19:24:18,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46159-0x100785770e30001, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T19:24:18,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T19:24:18,221 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:24:18,221 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:24:18,223 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6ef6ccb75414,46159,1732562655686 2024-11-25T19:24:18,233 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T19:24:18,234 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6ef6ccb75414,46159,1732562655686 in 306 msec 2024-11-25T19:24:18,248 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T19:24:18,249 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 768 msec 2024-11-25T19:24:18,251 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T19:24:18,251 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T19:24:18,279 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T19:24:18,281 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6ef6ccb75414,46159,1732562655686, seqNum=-1] 2024-11-25T19:24:18,310 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T19:24:18,313 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47473, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T19:24:18,341 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1390 sec 2024-11-25T19:24:18,341 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732562658341, completionTime=-1 2024-11-25T19:24:18,344 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-25T19:24:18,345 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T19:24:18,381 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-25T19:24:18,381 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732562718381 2024-11-25T19:24:18,381 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732562778381 2024-11-25T19:24:18,382 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 37 msec 2024-11-25T19:24:18,385 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,34569,1732562654897-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:24:18,386 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,34569,1732562654897-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:24:18,386 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,34569,1732562654897-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:24:18,388 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6ef6ccb75414:34569, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:24:18,388 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T19:24:18,394 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T19:24:18,399 DEBUG [master/6ef6ccb75414:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T19:24:18,425 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.560sec 2024-11-25T19:24:18,426 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T19:24:18,428 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T19:24:18,429 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T19:24:18,431 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T19:24:18,431 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T19:24:18,432 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,34569,1732562654897-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T19:24:18,433 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,34569,1732562654897-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T19:24:18,444 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T19:24:18,445 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T19:24:18,446 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,34569,1732562654897-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:24:18,511 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4731d90b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T19:24:18,513 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-25T19:24:18,515 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-25T19:24:18,519 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6ef6ccb75414,34569,-1 for getting cluster id 2024-11-25T19:24:18,523 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T19:24:18,539 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5b159736-677a-4ffe-ae58-34dcc90762e2' 2024-11-25T19:24:18,543 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T19:24:18,543 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5b159736-677a-4ffe-ae58-34dcc90762e2" 2024-11-25T19:24:18,544 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@334d249d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T19:24:18,544 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6ef6ccb75414,34569,-1] 2024-11-25T19:24:18,548 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T19:24:18,551 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:24:18,555 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42230, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T19:24:18,559 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2eb3700d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T19:24:18,559 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T19:24:18,567 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6ef6ccb75414,46159,1732562655686, seqNum=-1] 2024-11-25T19:24:18,568 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T19:24:18,575 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56832, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T19:24:18,606 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6ef6ccb75414,34569,1732562654897 2024-11-25T19:24:18,607 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:24:18,620 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-25T19:24:18,626 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-25T19:24:18,633 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 6ef6ccb75414,34569,1732562654897 2024-11-25T19:24:18,636 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@53459839 2024-11-25T19:24:18,637 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T19:24:18,641 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42238, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T19:24:18,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34569 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-25T19:24:18,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34569 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-25T19:24:18,650 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34569 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T19:24:18,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34569 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-25T19:24:18,672 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T19:24:18,675 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34569 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-25T19:24:18,676 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:24:18,680 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T19:24:18,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34569 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T19:24:18,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741835_1011 (size=389) 2024-11-25T19:24:18,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741835_1011 (size=389) 2024-11-25T19:24:18,758 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3fdd8219a05a24e59832e2faa0413820, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678 2024-11-25T19:24:18,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741836_1012 (size=72) 2024-11-25T19:24:18,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741836_1012 (size=72) 2024-11-25T19:24:18,785 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:24:18,785 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 3fdd8219a05a24e59832e2faa0413820, disabling compactions & flushes 2024-11-25T19:24:18,785 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820. 2024-11-25T19:24:18,785 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820. 2024-11-25T19:24:18,785 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820. after waiting 0 ms 2024-11-25T19:24:18,786 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820. 2024-11-25T19:24:18,786 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820. 2024-11-25T19:24:18,786 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3fdd8219a05a24e59832e2faa0413820: Waiting for close lock at 1732562658785Disabling compacts and flushes for region at 1732562658785Disabling writes for close at 1732562658786 (+1 ms)Writing region close event to WAL at 1732562658786Closed at 1732562658786 2024-11-25T19:24:18,788 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T19:24:18,794 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732562658789"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732562658789"}]},"ts":"1732562658789"} 2024-11-25T19:24:18,805 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-25T19:24:18,808 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T19:24:18,812 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732562658809"}]},"ts":"1732562658809"} 2024-11-25T19:24:18,818 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-25T19:24:18,821 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=3fdd8219a05a24e59832e2faa0413820, ASSIGN}] 2024-11-25T19:24:18,824 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=3fdd8219a05a24e59832e2faa0413820, ASSIGN 2024-11-25T19:24:18,828 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=3fdd8219a05a24e59832e2faa0413820, ASSIGN; state=OFFLINE, location=6ef6ccb75414,46159,1732562655686; forceNewPlan=false, retain=false 2024-11-25T19:24:18,979 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3fdd8219a05a24e59832e2faa0413820, regionState=OPENING, regionLocation=6ef6ccb75414,46159,1732562655686 2024-11-25T19:24:18,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=3fdd8219a05a24e59832e2faa0413820, ASSIGN because future has completed 2024-11-25T19:24:18,990 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3fdd8219a05a24e59832e2faa0413820, server=6ef6ccb75414,46159,1732562655686}] 2024-11-25T19:24:19,156 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820. 2024-11-25T19:24:19,157 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 3fdd8219a05a24e59832e2faa0413820, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820.', STARTKEY => '', ENDKEY => ''} 2024-11-25T19:24:19,158 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 3fdd8219a05a24e59832e2faa0413820 2024-11-25T19:24:19,158 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:24:19,158 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 3fdd8219a05a24e59832e2faa0413820 2024-11-25T19:24:19,158 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 3fdd8219a05a24e59832e2faa0413820 2024-11-25T19:24:19,164 INFO [StoreOpener-3fdd8219a05a24e59832e2faa0413820-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3fdd8219a05a24e59832e2faa0413820 2024-11-25T19:24:19,168 INFO [StoreOpener-3fdd8219a05a24e59832e2faa0413820-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3fdd8219a05a24e59832e2faa0413820 columnFamilyName info 2024-11-25T19:24:19,168 DEBUG [StoreOpener-3fdd8219a05a24e59832e2faa0413820-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:24:19,171 INFO [StoreOpener-3fdd8219a05a24e59832e2faa0413820-1 {}] regionserver.HStore(327): Store=3fdd8219a05a24e59832e2faa0413820/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:24:19,171 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 3fdd8219a05a24e59832e2faa0413820 2024-11-25T19:24:19,174 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820 2024-11-25T19:24:19,175 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820 2024-11-25T19:24:19,176 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 3fdd8219a05a24e59832e2faa0413820 2024-11-25T19:24:19,176 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 3fdd8219a05a24e59832e2faa0413820 2024-11-25T19:24:19,182 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 3fdd8219a05a24e59832e2faa0413820 2024-11-25T19:24:19,186 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T19:24:19,187 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 3fdd8219a05a24e59832e2faa0413820; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=695454, jitterRate=-0.11568552255630493}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T19:24:19,187 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3fdd8219a05a24e59832e2faa0413820 2024-11-25T19:24:19,189 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 3fdd8219a05a24e59832e2faa0413820: Running coprocessor pre-open hook at 1732562659159Writing region info on filesystem at 1732562659159Initializing all the Stores at 1732562659161 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562659161Cleaning up temporary data from old regions at 1732562659176 (+15 ms)Running coprocessor post-open hooks at 1732562659187 (+11 ms)Region opened successfully at 1732562659188 (+1 ms) 2024-11-25T19:24:19,191 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820., pid=6, masterSystemTime=1732562659147 2024-11-25T19:24:19,197 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820. 2024-11-25T19:24:19,198 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820. 2024-11-25T19:24:19,198 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3fdd8219a05a24e59832e2faa0413820, regionState=OPEN, openSeqNum=2, regionLocation=6ef6ccb75414,46159,1732562655686 2024-11-25T19:24:19,205 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3fdd8219a05a24e59832e2faa0413820, server=6ef6ccb75414,46159,1732562655686 because future has completed 2024-11-25T19:24:19,216 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-25T19:24:19,218 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 3fdd8219a05a24e59832e2faa0413820, server=6ef6ccb75414,46159,1732562655686 in 218 msec 2024-11-25T19:24:19,223 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-25T19:24:19,223 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=3fdd8219a05a24e59832e2faa0413820, ASSIGN in 396 msec 2024-11-25T19:24:19,225 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T19:24:19,225 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732562659225"}]},"ts":"1732562659225"} 2024-11-25T19:24:19,232 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-25T19:24:19,234 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T19:24:19,239 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 580 msec 2024-11-25T19:24:23,658 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-25T19:24:23,722 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-25T19:24:23,724 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-25T19:24:25,386 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T19:24:25,386 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-25T19:24:25,388 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-25T19:24:25,389 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-25T19:24:25,390 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T19:24:25,390 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-25T19:24:25,390 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-25T19:24:25,390 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-25T19:24:28,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34569 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T19:24:28,707 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-25T19:24:28,710 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-25T19:24:28,718 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-25T19:24:28,719 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820. 2024-11-25T19:24:28,720 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C46159%2C1732562655686.1732562668719 2024-11-25T19:24:28,731 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:24:28,731 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:24:28,731 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:24:28,732 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:24:28,732 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:24:28,732 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562657635 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562668719 2024-11-25T19:24:28,734 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33025:33025),(127.0.0.1/127.0.0.1:38421:38421)] 2024-11-25T19:24:28,734 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562657635 is not closed yet, will try archiving it next time 2024-11-25T19:24:28,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741833_1009 (size=451) 2024-11-25T19:24:28,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741833_1009 (size=451) 2024-11-25T19:24:28,742 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562657635 to hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/oldWALs/6ef6ccb75414%2C46159%2C1732562655686.1732562657635 2024-11-25T19:24:28,744 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820., hostname=6ef6ccb75414,46159,1732562655686, seqNum=2] 2024-11-25T19:24:40,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] regionserver.HRegion(8855): Flush requested on 3fdd8219a05a24e59832e2faa0413820 2024-11-25T19:24:40,788 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3fdd8219a05a24e59832e2faa0413820 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T19:24:40,846 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/.tmp/info/c0a5aeca7e9a47b4950ba821163c925b is 1080, key is row0001/info:/1732562668747/Put/seqid=0 2024-11-25T19:24:40,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741838_1014 (size=12509) 2024-11-25T19:24:40,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741838_1014 (size=12509) 2024-11-25T19:24:40,859 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/.tmp/info/c0a5aeca7e9a47b4950ba821163c925b 2024-11-25T19:24:40,906 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/.tmp/info/c0a5aeca7e9a47b4950ba821163c925b as hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/c0a5aeca7e9a47b4950ba821163c925b 2024-11-25T19:24:40,918 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/c0a5aeca7e9a47b4950ba821163c925b, entries=7, sequenceid=11, filesize=12.2 K 2024-11-25T19:24:40,926 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 3fdd8219a05a24e59832e2faa0413820 in 139ms, sequenceid=11, compaction requested=false 2024-11-25T19:24:40,927 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3fdd8219a05a24e59832e2faa0413820: 2024-11-25T19:24:44,246 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T19:24:48,802 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C46159%2C1732562655686.1732562688802 2024-11-25T19:24:49,020 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 212 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK], DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK]] 2024-11-25T19:24:49,020 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:24:49,020 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:24:49,021 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:24:49,021 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:24:49,021 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:24:49,022 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562668719 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562688802 2024-11-25T19:24:49,023 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38421:38421),(127.0.0.1/127.0.0.1:33025:33025)] 2024-11-25T19:24:49,023 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562668719 is not closed yet, will try archiving it next time 2024-11-25T19:24:49,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741837_1013 (size=12399) 2024-11-25T19:24:49,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741837_1013 (size=12399) 2024-11-25T19:24:49,230 INFO [FSHLog-0-hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678-prefix:6ef6ccb75414,46159,1732562655686 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK], DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK]] 2024-11-25T19:24:51,435 INFO [FSHLog-0-hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678-prefix:6ef6ccb75414,46159,1732562655686 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK], DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK]] 2024-11-25T19:24:53,641 INFO [FSHLog-0-hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678-prefix:6ef6ccb75414,46159,1732562655686 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK], DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK]] 2024-11-25T19:24:55,846 INFO [FSHLog-0-hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678-prefix:6ef6ccb75414,46159,1732562655686 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK], DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK]] 2024-11-25T19:24:55,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] regionserver.HRegion(8855): Flush requested on 3fdd8219a05a24e59832e2faa0413820 2024-11-25T19:24:55,846 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3fdd8219a05a24e59832e2faa0413820 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T19:24:56,048 INFO [FSHLog-0-hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678-prefix:6ef6ccb75414,46159,1732562655686 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK], DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK]] 2024-11-25T19:24:56,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/.tmp/info/c89918398c3b4bdc8520252423304944 is 1080, key is row0008/info:/1732562682785/Put/seqid=0 2024-11-25T19:24:56,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741840_1016 (size=12509) 2024-11-25T19:24:56,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741840_1016 (size=12509) 2024-11-25T19:24:56,064 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/.tmp/info/c89918398c3b4bdc8520252423304944 2024-11-25T19:24:56,079 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/.tmp/info/c89918398c3b4bdc8520252423304944 as hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/c89918398c3b4bdc8520252423304944 2024-11-25T19:24:56,094 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/c89918398c3b4bdc8520252423304944, entries=7, sequenceid=21, filesize=12.2 K 2024-11-25T19:24:56,296 INFO [FSHLog-0-hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678-prefix:6ef6ccb75414,46159,1732562655686 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK], DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK]] 2024-11-25T19:24:56,296 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 3fdd8219a05a24e59832e2faa0413820 in 450ms, sequenceid=21, compaction requested=false 2024-11-25T19:24:56,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3fdd8219a05a24e59832e2faa0413820: 2024-11-25T19:24:56,297 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-25T19:24:56,297 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:24:56,298 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/c0a5aeca7e9a47b4950ba821163c925b because midkey is the same as first or last row 2024-11-25T19:24:58,050 INFO [FSHLog-0-hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678-prefix:6ef6ccb75414,46159,1732562655686 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK], DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK]] 2024-11-25T19:24:58,451 INFO [master/6ef6ccb75414:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-25T19:24:58,451 INFO [master/6ef6ccb75414:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-25T19:25:00,256 INFO [FSHLog-0-hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678-prefix:6ef6ccb75414,46159,1732562655686 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK], DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK]] 2024-11-25T19:25:00,262 WARN [FSHLog-0-hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678-prefix:6ef6ccb75414,46159,1732562655686 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK], DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK]] 2024-11-25T19:25:00,264 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6ef6ccb75414%2C46159%2C1732562655686:(num 1732562688802) roll requested 2024-11-25T19:25:00,264 INFO [regionserver/6ef6ccb75414:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C46159%2C1732562655686.1732562700264 2024-11-25T19:25:00,478 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 211 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK], DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK]] 2024-11-25T19:25:00,478 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:00,479 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:00,479 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:00,479 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:00,479 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:00,480 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562688802 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562700264 2024-11-25T19:25:00,481 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33025:33025),(127.0.0.1/127.0.0.1:38421:38421)] 2024-11-25T19:25:00,482 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562688802 is not closed yet, will try archiving it next time 2024-11-25T19:25:00,482 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562668719 to hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/oldWALs/6ef6ccb75414%2C46159%2C1732562655686.1732562668719 2024-11-25T19:25:00,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741839_1015 (size=7739) 2024-11-25T19:25:00,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741839_1015 (size=7739) 2024-11-25T19:25:02,464 INFO [FSHLog-0-hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678-prefix:6ef6ccb75414,46159,1732562655686 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK], DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK]] 2024-11-25T19:25:04,158 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3fdd8219a05a24e59832e2faa0413820, had cached 0 bytes from a total of 25018 2024-11-25T19:25:04,669 INFO [FSHLog-0-hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678-prefix:6ef6ccb75414,46159,1732562655686 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK], DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK]] 2024-11-25T19:25:06,877 INFO [FSHLog-0-hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678-prefix:6ef6ccb75414,46159,1732562655686 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK], DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK]] 2024-11-25T19:25:09,086 INFO [FSHLog-0-hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678-prefix:6ef6ccb75414,46159,1732562655686 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK], DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK]] 2024-11-25T19:25:11,089 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-25T19:25:11,090 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C46159%2C1732562655686.1732562711089 2024-11-25T19:25:14,246 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T19:25:16,104 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5011 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK], DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK]] 2024-11-25T19:25:16,107 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5011 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK], DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK]] 2024-11-25T19:25:16,107 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6ef6ccb75414%2C46159%2C1732562655686:(num 1732562711089) roll requested 2024-11-25T19:25:16,107 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:16,107 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:16,108 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:16,108 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:16,108 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:16,108 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562700264 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562711089 2024-11-25T19:25:16,109 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33025:33025),(127.0.0.1/127.0.0.1:38421:38421)] 2024-11-25T19:25:16,110 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562700264 is not closed yet, will try archiving it next time 2024-11-25T19:25:16,110 INFO [regionserver/6ef6ccb75414:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C46159%2C1732562655686.1732562716110 2024-11-25T19:25:16,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741841_1017 (size=4753) 2024-11-25T19:25:16,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741841_1017 (size=4753) 2024-11-25T19:25:21,115 INFO [FSHLog-0-hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678-prefix:6ef6ccb75414,46159,1732562655686 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK], DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK]] 2024-11-25T19:25:21,116 WARN [FSHLog-0-hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678-prefix:6ef6ccb75414,46159,1732562655686 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK], DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK]] 2024-11-25T19:25:21,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] regionserver.HRegion(8855): Flush requested on 3fdd8219a05a24e59832e2faa0413820 2024-11-25T19:25:21,117 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3fdd8219a05a24e59832e2faa0413820 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T19:25:21,124 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5009 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK], DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK]] 2024-11-25T19:25:21,124 WARN [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5009 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK], DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK]] 2024-11-25T19:25:23,117 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-25T19:25:26,122 INFO [FSHLog-0-hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678-prefix:6ef6ccb75414,46159,1732562655686 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK], DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK]] 2024-11-25T19:25:26,122 WARN [FSHLog-0-hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678-prefix:6ef6ccb75414,46159,1732562655686 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK], DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK]] 2024-11-25T19:25:26,123 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:26,123 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:26,124 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:26,124 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:26,124 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:26,125 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562711089 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562716110 2024-11-25T19:25:26,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741842_1018 (size=1569) 2024-11-25T19:25:26,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741842_1018 (size=1569) 2024-11-25T19:25:26,130 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33025:33025),(127.0.0.1/127.0.0.1:38421:38421)] 2024-11-25T19:25:26,130 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562711089 is not closed yet, will try archiving it next time 2024-11-25T19:25:26,130 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6ef6ccb75414%2C46159%2C1732562655686:(num 1732562716110) roll requested 2024-11-25T19:25:26,130 INFO [regionserver/6ef6ccb75414:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C46159%2C1732562655686.1732562726130 2024-11-25T19:25:26,131 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/.tmp/info/85f2343e65774b68810444bd23b7af46 is 1080, key is row0015/info:/1732562697848/Put/seqid=0 2024-11-25T19:25:26,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741844_1020 (size=12509) 2024-11-25T19:25:26,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741844_1020 (size=12509) 2024-11-25T19:25:26,140 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/.tmp/info/85f2343e65774b68810444bd23b7af46 2024-11-25T19:25:26,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/.tmp/info/85f2343e65774b68810444bd23b7af46 as hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/85f2343e65774b68810444bd23b7af46 2024-11-25T19:25:26,161 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/85f2343e65774b68810444bd23b7af46, entries=7, sequenceid=31, filesize=12.2 K 2024-11-25T19:25:31,137 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5004 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK], DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK]] 2024-11-25T19:25:31,137 WARN [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5004 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK], DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK]] 2024-11-25T19:25:31,164 INFO [FSHLog-0-hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678-prefix:6ef6ccb75414,46159,1732562655686 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK], DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK]] 2024-11-25T19:25:31,164 WARN [FSHLog-0-hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678-prefix:6ef6ccb75414,46159,1732562655686 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41765,DS-5dac31d6-1394-469f-b646-2077024ab044,DISK], DatanodeInfoWithStorage[127.0.0.1:44037,DS-8a6e0222-9103-41cb-a269-2388de6091a9,DISK]] 2024-11-25T19:25:31,164 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 3fdd8219a05a24e59832e2faa0413820 in 10048ms, sequenceid=31, compaction requested=true 2024-11-25T19:25:31,164 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:31,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3fdd8219a05a24e59832e2faa0413820: 2024-11-25T19:25:31,165 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:31,165 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-25T19:25:31,165 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:31,165 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:25:31,165 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:31,165 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/c0a5aeca7e9a47b4950ba821163c925b because midkey is the same as first or last row 2024-11-25T19:25:31,165 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:31,165 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562716110 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562726130 2024-11-25T19:25:31,167 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33025:33025),(127.0.0.1/127.0.0.1:38421:38421)] 2024-11-25T19:25:31,167 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562716110 is not closed yet, will try archiving it next time 2024-11-25T19:25:31,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3fdd8219a05a24e59832e2faa0413820:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T19:25:31,167 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562688802 to hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/oldWALs/6ef6ccb75414%2C46159%2C1732562655686.1732562688802 2024-11-25T19:25:31,167 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6ef6ccb75414%2C46159%2C1732562655686:(num 1732562731167) roll requested 2024-11-25T19:25:31,167 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C46159%2C1732562655686.1732562731167 2024-11-25T19:25:31,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741843_1019 (size=438) 2024-11-25T19:25:31,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741843_1019 (size=438) 2024-11-25T19:25:31,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:25:31,169 DEBUG [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T19:25:31,170 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562700264 to hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/oldWALs/6ef6ccb75414%2C46159%2C1732562655686.1732562700264 2024-11-25T19:25:31,171 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562711089 to hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/oldWALs/6ef6ccb75414%2C46159%2C1732562655686.1732562711089 2024-11-25T19:25:31,173 DEBUG [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T19:25:31,173 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562716110 to hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/oldWALs/6ef6ccb75414%2C46159%2C1732562655686.1732562716110 2024-11-25T19:25:31,175 DEBUG [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] regionserver.HStore(1541): 3fdd8219a05a24e59832e2faa0413820/info is initiating minor compaction (all files) 2024-11-25T19:25:31,175 INFO [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 3fdd8219a05a24e59832e2faa0413820/info in TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820. 2024-11-25T19:25:31,176 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:31,176 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:31,176 INFO [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/c0a5aeca7e9a47b4950ba821163c925b, hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/c89918398c3b4bdc8520252423304944, hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/85f2343e65774b68810444bd23b7af46] into tmpdir=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/.tmp, totalSize=36.6 K 2024-11-25T19:25:31,176 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:31,176 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:31,176 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:31,176 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562726130 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562731167 2024-11-25T19:25:31,177 DEBUG [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] compactions.Compactor(225): Compacting c0a5aeca7e9a47b4950ba821163c925b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732562668747 2024-11-25T19:25:31,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741845_1021 (size=93) 2024-11-25T19:25:31,178 DEBUG [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] compactions.Compactor(225): Compacting c89918398c3b4bdc8520252423304944, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732562682785 2024-11-25T19:25:31,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741845_1021 (size=93) 2024-11-25T19:25:31,179 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562726130 to hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/oldWALs/6ef6ccb75414%2C46159%2C1732562655686.1732562726130 2024-11-25T19:25:31,179 DEBUG [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] compactions.Compactor(225): Compacting 85f2343e65774b68810444bd23b7af46, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732562697848 2024-11-25T19:25:31,185 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33025:33025),(127.0.0.1/127.0.0.1:38421:38421)] 2024-11-25T19:25:31,186 INFO [regionserver/6ef6ccb75414:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C46159%2C1732562655686.1732562731185 2024-11-25T19:25:31,204 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:31,205 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:31,205 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:31,205 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:31,205 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:31,205 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562731167 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/WALs/6ef6ccb75414,46159,1732562655686/6ef6ccb75414%2C46159%2C1732562655686.1732562731185 2024-11-25T19:25:31,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741846_1022 (size=1258) 2024-11-25T19:25:31,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741846_1022 (size=1258) 2024-11-25T19:25:31,209 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33025:33025),(127.0.0.1/127.0.0.1:38421:38421)] 2024-11-25T19:25:31,215 INFO [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3fdd8219a05a24e59832e2faa0413820#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T19:25:31,216 DEBUG [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/.tmp/info/4b747b0e46e846f58f69afed3eeafa9f is 1080, key is row0001/info:/1732562668747/Put/seqid=0 2024-11-25T19:25:31,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741848_1024 (size=27710) 2024-11-25T19:25:31,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741848_1024 (size=27710) 2024-11-25T19:25:31,233 DEBUG [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/.tmp/info/4b747b0e46e846f58f69afed3eeafa9f as hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/4b747b0e46e846f58f69afed3eeafa9f 2024-11-25T19:25:31,250 INFO [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 3fdd8219a05a24e59832e2faa0413820/info of 3fdd8219a05a24e59832e2faa0413820 into 4b747b0e46e846f58f69afed3eeafa9f(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T19:25:31,250 DEBUG [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 3fdd8219a05a24e59832e2faa0413820: 2024-11-25T19:25:31,251 INFO [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820., storeName=3fdd8219a05a24e59832e2faa0413820/info, priority=13, startTime=1732562731166; duration=0sec 2024-11-25T19:25:31,252 DEBUG [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-25T19:25:31,252 DEBUG [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:25:31,252 DEBUG [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/4b747b0e46e846f58f69afed3eeafa9f because midkey is the same as first or last row 2024-11-25T19:25:31,252 DEBUG [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-25T19:25:31,252 DEBUG [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:25:31,252 DEBUG [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/4b747b0e46e846f58f69afed3eeafa9f because midkey is the same as first or last row 2024-11-25T19:25:31,253 DEBUG [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-25T19:25:31,253 DEBUG [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:25:31,253 DEBUG [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/4b747b0e46e846f58f69afed3eeafa9f because midkey is the same as first or last row 2024-11-25T19:25:31,253 DEBUG [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:25:31,253 DEBUG [RS:0;6ef6ccb75414:46159-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3fdd8219a05a24e59832e2faa0413820:info 2024-11-25T19:25:43,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] regionserver.HRegion(8855): Flush requested on 3fdd8219a05a24e59832e2faa0413820 2024-11-25T19:25:43,214 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3fdd8219a05a24e59832e2faa0413820 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T19:25:43,224 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/.tmp/info/c0daa822849d41cda0f2cbafce61b727 is 1080, key is row0022/info:/1732562731187/Put/seqid=0 2024-11-25T19:25:43,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741849_1025 (size=12509) 2024-11-25T19:25:43,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741849_1025 (size=12509) 2024-11-25T19:25:43,231 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/.tmp/info/c0daa822849d41cda0f2cbafce61b727 2024-11-25T19:25:43,240 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/.tmp/info/c0daa822849d41cda0f2cbafce61b727 as hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/c0daa822849d41cda0f2cbafce61b727 2024-11-25T19:25:43,249 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/c0daa822849d41cda0f2cbafce61b727, entries=7, sequenceid=42, filesize=12.2 K 2024-11-25T19:25:43,251 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 3fdd8219a05a24e59832e2faa0413820 in 36ms, sequenceid=42, compaction requested=false 2024-11-25T19:25:43,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3fdd8219a05a24e59832e2faa0413820: 2024-11-25T19:25:43,251 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-25T19:25:43,251 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:25:43,251 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/4b747b0e46e846f58f69afed3eeafa9f because midkey is the same as first or last row 2024-11-25T19:25:44,247 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T19:25:49,159 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3fdd8219a05a24e59832e2faa0413820, had cached 0 bytes from a total of 40219 2024-11-25T19:25:51,229 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T19:25:51,230 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T19:25:51,230 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T19:25:51,236 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:25:51,237 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:25:51,237 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T19:25:51,237 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T19:25:51,238 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=476640655, stopped=false 2024-11-25T19:25:51,238 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6ef6ccb75414,34569,1732562654897 2024-11-25T19:25:51,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46159-0x100785770e30001, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T19:25:51,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T19:25:51,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46159-0x100785770e30001, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:51,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:51,241 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T19:25:51,241 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T19:25:51,242 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46159-0x100785770e30001, quorum=127.0.0.1:60717, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:25:51,242 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T19:25:51,242 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:25:51,242 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:25:51,242 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6ef6ccb75414,46159,1732562655686' ***** 2024-11-25T19:25:51,242 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T19:25:51,243 INFO [RS:0;6ef6ccb75414:46159 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T19:25:51,243 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T19:25:51,243 INFO [RS:0;6ef6ccb75414:46159 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T19:25:51,244 INFO [RS:0;6ef6ccb75414:46159 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T19:25:51,244 INFO [RS:0;6ef6ccb75414:46159 {}] regionserver.HRegionServer(3091): Received CLOSE for 3fdd8219a05a24e59832e2faa0413820 2024-11-25T19:25:51,245 INFO [RS:0;6ef6ccb75414:46159 {}] regionserver.HRegionServer(959): stopping server 6ef6ccb75414,46159,1732562655686 2024-11-25T19:25:51,245 INFO [RS:0;6ef6ccb75414:46159 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T19:25:51,245 INFO [RS:0;6ef6ccb75414:46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6ef6ccb75414:46159. 2024-11-25T19:25:51,245 DEBUG [RS:0;6ef6ccb75414:46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T19:25:51,245 DEBUG [RS:0;6ef6ccb75414:46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:25:51,245 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 3fdd8219a05a24e59832e2faa0413820, disabling compactions & flushes 2024-11-25T19:25:51,245 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820. 2024-11-25T19:25:51,245 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820. 2024-11-25T19:25:51,245 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820. after waiting 0 ms 2024-11-25T19:25:51,245 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820. 2024-11-25T19:25:51,245 INFO [RS:0;6ef6ccb75414:46159 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T19:25:51,245 INFO [RS:0;6ef6ccb75414:46159 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T19:25:51,245 INFO [RS:0;6ef6ccb75414:46159 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T19:25:51,246 INFO [RS:0;6ef6ccb75414:46159 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T19:25:51,246 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 3fdd8219a05a24e59832e2faa0413820 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-25T19:25:51,246 INFO [RS:0;6ef6ccb75414:46159 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-25T19:25:51,246 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T19:25:51,246 DEBUG [RS:0;6ef6ccb75414:46159 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 3fdd8219a05a24e59832e2faa0413820=TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820.} 2024-11-25T19:25:51,246 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T19:25:51,246 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T19:25:51,246 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T19:25:51,246 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T19:25:51,247 DEBUG [RS:0;6ef6ccb75414:46159 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 3fdd8219a05a24e59832e2faa0413820 2024-11-25T19:25:51,247 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-25T19:25:51,252 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/.tmp/info/324b781b14c244d4a4ab12c7f9e8fbc3 is 1080, key is row0029/info:/1732562745218/Put/seqid=0 2024-11-25T19:25:51,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741850_1026 (size=8193) 2024-11-25T19:25:51,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741850_1026 (size=8193) 2024-11-25T19:25:51,263 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/.tmp/info/324b781b14c244d4a4ab12c7f9e8fbc3 2024-11-25T19:25:51,274 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/.tmp/info/324b781b14c244d4a4ab12c7f9e8fbc3 as hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/324b781b14c244d4a4ab12c7f9e8fbc3 2024-11-25T19:25:51,276 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/hbase/meta/1588230740/.tmp/info/f8563fc39f834eec904b3c3ae6a66f86 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820./info:regioninfo/1732562659198/Put/seqid=0 2024-11-25T19:25:51,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741851_1027 (size=7016) 2024-11-25T19:25:51,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741851_1027 (size=7016) 2024-11-25T19:25:51,283 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/hbase/meta/1588230740/.tmp/info/f8563fc39f834eec904b3c3ae6a66f86 2024-11-25T19:25:51,284 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/324b781b14c244d4a4ab12c7f9e8fbc3, entries=3, sequenceid=48, filesize=8.0 K 2024-11-25T19:25:51,285 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 3fdd8219a05a24e59832e2faa0413820 in 40ms, sequenceid=48, compaction requested=true 2024-11-25T19:25:51,286 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/c0a5aeca7e9a47b4950ba821163c925b, hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/c89918398c3b4bdc8520252423304944, hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/85f2343e65774b68810444bd23b7af46] to archive 2024-11-25T19:25:51,289 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T19:25:51,293 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/c0a5aeca7e9a47b4950ba821163c925b to hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/archive/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/c0a5aeca7e9a47b4950ba821163c925b 2024-11-25T19:25:51,301 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/c89918398c3b4bdc8520252423304944 to hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/archive/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/c89918398c3b4bdc8520252423304944 2024-11-25T19:25:51,303 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/85f2343e65774b68810444bd23b7af46 to hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/archive/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/info/85f2343e65774b68810444bd23b7af46 2024-11-25T19:25:51,314 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/hbase/meta/1588230740/.tmp/ns/5f2b76b1e7b741de9288aeafdb4aa3a7 is 43, key is default/ns:d/1732562658319/Put/seqid=0 2024-11-25T19:25:51,315 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=6ef6ccb75414:34569 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-25T19:25:51,320 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [c0a5aeca7e9a47b4950ba821163c925b=12509, c89918398c3b4bdc8520252423304944=12509, 85f2343e65774b68810444bd23b7af46=12509] 2024-11-25T19:25:51,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741852_1028 (size=5153) 2024-11-25T19:25:51,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741852_1028 (size=5153) 2024-11-25T19:25:51,327 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/default/TestLogRolling-testSlowSyncLogRolling/3fdd8219a05a24e59832e2faa0413820/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-25T19:25:51,329 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820. 2024-11-25T19:25:51,329 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 3fdd8219a05a24e59832e2faa0413820: Waiting for close lock at 1732562751245Running coprocessor pre-close hooks at 1732562751245Disabling compacts and flushes for region at 1732562751245Disabling writes for close at 1732562751245Obtaining lock to block concurrent updates at 1732562751246 (+1 ms)Preparing flush snapshotting stores in 3fdd8219a05a24e59832e2faa0413820 at 1732562751246Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732562751246Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820. at 1732562751247 (+1 ms)Flushing 3fdd8219a05a24e59832e2faa0413820/info: creating writer at 1732562751247Flushing 3fdd8219a05a24e59832e2faa0413820/info: appending metadata at 1732562751251 (+4 ms)Flushing 3fdd8219a05a24e59832e2faa0413820/info: closing flushed file at 1732562751251Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73747456: reopening flushed file at 1732562751273 (+22 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 3fdd8219a05a24e59832e2faa0413820 in 40ms, sequenceid=48, compaction requested=true at 1732562751285 (+12 ms)Writing region close event to WAL at 1732562751322 (+37 ms)Running coprocessor post-close hooks at 1732562751327 (+5 ms)Closed at 1732562751329 (+2 ms) 2024-11-25T19:25:51,330 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732562658643.3fdd8219a05a24e59832e2faa0413820. 2024-11-25T19:25:51,447 DEBUG [RS:0;6ef6ccb75414:46159 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-25T19:25:51,469 INFO [regionserver/6ef6ccb75414:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-25T19:25:51,470 INFO [regionserver/6ef6ccb75414:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-25T19:25:51,478 INFO [regionserver/6ef6ccb75414:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T19:25:51,648 DEBUG [RS:0;6ef6ccb75414:46159 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-25T19:25:51,724 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/hbase/meta/1588230740/.tmp/ns/5f2b76b1e7b741de9288aeafdb4aa3a7 2024-11-25T19:25:51,753 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/hbase/meta/1588230740/.tmp/table/3d77ff2a18054d7daf54ca7c199762c5 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732562659225/Put/seqid=0 2024-11-25T19:25:51,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741853_1029 (size=5396) 2024-11-25T19:25:51,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741853_1029 (size=5396) 2024-11-25T19:25:51,760 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/hbase/meta/1588230740/.tmp/table/3d77ff2a18054d7daf54ca7c199762c5 2024-11-25T19:25:51,769 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/hbase/meta/1588230740/.tmp/info/f8563fc39f834eec904b3c3ae6a66f86 as hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/hbase/meta/1588230740/info/f8563fc39f834eec904b3c3ae6a66f86 2024-11-25T19:25:51,777 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/hbase/meta/1588230740/info/f8563fc39f834eec904b3c3ae6a66f86, entries=10, sequenceid=11, filesize=6.9 K 2024-11-25T19:25:51,779 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/hbase/meta/1588230740/.tmp/ns/5f2b76b1e7b741de9288aeafdb4aa3a7 as hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/hbase/meta/1588230740/ns/5f2b76b1e7b741de9288aeafdb4aa3a7 2024-11-25T19:25:51,787 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/hbase/meta/1588230740/ns/5f2b76b1e7b741de9288aeafdb4aa3a7, entries=2, sequenceid=11, filesize=5.0 K 2024-11-25T19:25:51,788 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/hbase/meta/1588230740/.tmp/table/3d77ff2a18054d7daf54ca7c199762c5 as hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/hbase/meta/1588230740/table/3d77ff2a18054d7daf54ca7c199762c5 2024-11-25T19:25:51,796 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/hbase/meta/1588230740/table/3d77ff2a18054d7daf54ca7c199762c5, entries=2, sequenceid=11, filesize=5.3 K 2024-11-25T19:25:51,798 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 552ms, sequenceid=11, compaction requested=false 2024-11-25T19:25:51,803 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-25T19:25:51,804 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T19:25:51,804 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T19:25:51,805 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732562751246Running coprocessor pre-close hooks at 1732562751246Disabling compacts and flushes for region at 1732562751246Disabling writes for close at 1732562751246Obtaining lock to block concurrent updates at 1732562751247 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732562751247Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732562751247Flushing stores of hbase:meta,,1.1588230740 at 1732562751248 (+1 ms)Flushing 1588230740/info: creating writer at 1732562751248Flushing 1588230740/info: appending metadata at 1732562751276 (+28 ms)Flushing 1588230740/info: closing flushed file at 1732562751276Flushing 1588230740/ns: creating writer at 1732562751292 (+16 ms)Flushing 1588230740/ns: appending metadata at 1732562751314 (+22 ms)Flushing 1588230740/ns: closing flushed file at 1732562751314Flushing 1588230740/table: creating writer at 1732562751736 (+422 ms)Flushing 1588230740/table: appending metadata at 1732562751753 (+17 ms)Flushing 1588230740/table: closing flushed file at 1732562751753Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27628a69: reopening flushed file at 1732562751768 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c6654ee: reopening flushed file at 1732562751778 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12ddf2ef: reopening flushed file at 1732562751787 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 552ms, sequenceid=11, compaction requested=false at 1732562751798 (+11 ms)Writing region close event to WAL at 1732562751799 (+1 ms)Running coprocessor post-close hooks at 1732562751804 (+5 ms)Closed at 1732562751804 2024-11-25T19:25:51,805 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T19:25:51,848 INFO [RS:0;6ef6ccb75414:46159 {}] regionserver.HRegionServer(976): stopping server 6ef6ccb75414,46159,1732562655686; all regions closed. 2024-11-25T19:25:51,850 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:51,850 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:51,850 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:51,851 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:51,851 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:51,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741834_1010 (size=3066) 2024-11-25T19:25:51,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741834_1010 (size=3066) 2024-11-25T19:25:51,860 DEBUG [RS:0;6ef6ccb75414:46159 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/oldWALs 2024-11-25T19:25:51,860 INFO [RS:0;6ef6ccb75414:46159 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6ef6ccb75414%2C46159%2C1732562655686.meta:.meta(num 1732562658123) 2024-11-25T19:25:51,861 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:51,861 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:51,862 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:51,862 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:51,862 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:51,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741847_1023 (size=12695) 2024-11-25T19:25:51,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741847_1023 (size=12695) 2024-11-25T19:25:51,869 DEBUG [RS:0;6ef6ccb75414:46159 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/oldWALs 2024-11-25T19:25:51,869 INFO [RS:0;6ef6ccb75414:46159 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6ef6ccb75414%2C46159%2C1732562655686:(num 1732562731185) 2024-11-25T19:25:51,869 DEBUG [RS:0;6ef6ccb75414:46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:25:51,869 INFO [RS:0;6ef6ccb75414:46159 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T19:25:51,869 INFO [RS:0;6ef6ccb75414:46159 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T19:25:51,869 INFO [RS:0;6ef6ccb75414:46159 {}] hbase.ChoreService(370): Chore service for: regionserver/6ef6ccb75414:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-25T19:25:51,870 INFO [RS:0;6ef6ccb75414:46159 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T19:25:51,870 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T19:25:51,870 INFO [RS:0;6ef6ccb75414:46159 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46159 2024-11-25T19:25:51,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T19:25:51,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46159-0x100785770e30001, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6ef6ccb75414,46159,1732562655686 2024-11-25T19:25:51,873 INFO [RS:0;6ef6ccb75414:46159 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T19:25:51,875 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6ef6ccb75414,46159,1732562655686] 2024-11-25T19:25:51,876 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6ef6ccb75414,46159,1732562655686 already deleted, retry=false 2024-11-25T19:25:51,876 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6ef6ccb75414,46159,1732562655686 expired; onlineServers=0 2024-11-25T19:25:51,876 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6ef6ccb75414,34569,1732562654897' ***** 2024-11-25T19:25:51,876 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T19:25:51,876 INFO [M:0;6ef6ccb75414:34569 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T19:25:51,877 INFO [M:0;6ef6ccb75414:34569 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T19:25:51,877 DEBUG [M:0;6ef6ccb75414:34569 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T19:25:51,877 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T19:25:51,877 DEBUG [M:0;6ef6ccb75414:34569 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T19:25:51,877 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.large.0-1732562657327 {}] cleaner.HFileCleaner(306): Exit Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.large.0-1732562657327,5,FailOnTimeoutGroup] 2024-11-25T19:25:51,877 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.small.0-1732562657330 {}] cleaner.HFileCleaner(306): Exit Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.small.0-1732562657330,5,FailOnTimeoutGroup] 2024-11-25T19:25:51,877 INFO [M:0;6ef6ccb75414:34569 {}] hbase.ChoreService(370): Chore service for: master/6ef6ccb75414:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T19:25:51,877 INFO [M:0;6ef6ccb75414:34569 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T19:25:51,877 DEBUG [M:0;6ef6ccb75414:34569 {}] master.HMaster(1795): Stopping service threads 2024-11-25T19:25:51,877 INFO [M:0;6ef6ccb75414:34569 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T19:25:51,877 INFO [M:0;6ef6ccb75414:34569 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T19:25:51,878 INFO [M:0;6ef6ccb75414:34569 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T19:25:51,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T19:25:51,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:51,878 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T19:25:51,879 DEBUG [M:0;6ef6ccb75414:34569 {}] zookeeper.ZKUtil(347): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-25T19:25:51,879 WARN [M:0;6ef6ccb75414:34569 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-25T19:25:51,879 INFO [M:0;6ef6ccb75414:34569 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/.lastflushedseqids 2024-11-25T19:25:51,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741854_1030 (size=130) 2024-11-25T19:25:51,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741854_1030 (size=130) 2024-11-25T19:25:51,890 INFO [M:0;6ef6ccb75414:34569 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T19:25:51,891 INFO [M:0;6ef6ccb75414:34569 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T19:25:51,891 DEBUG [M:0;6ef6ccb75414:34569 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T19:25:51,891 INFO [M:0;6ef6ccb75414:34569 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:25:51,891 DEBUG [M:0;6ef6ccb75414:34569 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:25:51,891 DEBUG [M:0;6ef6ccb75414:34569 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T19:25:51,891 DEBUG [M:0;6ef6ccb75414:34569 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:25:51,891 INFO [M:0;6ef6ccb75414:34569 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-25T19:25:51,908 DEBUG [M:0;6ef6ccb75414:34569 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7ad1adc0d50c4a7da0894a6bcc64bb07 is 82, key is hbase:meta,,1/info:regioninfo/1732562658213/Put/seqid=0 2024-11-25T19:25:51,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741855_1031 (size=5672) 2024-11-25T19:25:51,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741855_1031 (size=5672) 2024-11-25T19:25:51,915 INFO [M:0;6ef6ccb75414:34569 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7ad1adc0d50c4a7da0894a6bcc64bb07 2024-11-25T19:25:51,936 DEBUG [M:0;6ef6ccb75414:34569 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/aae0fc9ea5ee4bd78df5f0037e9c0263 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732562659237/Put/seqid=0 2024-11-25T19:25:51,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741856_1032 (size=6247) 2024-11-25T19:25:51,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741856_1032 (size=6247) 2024-11-25T19:25:51,943 INFO [M:0;6ef6ccb75414:34569 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/aae0fc9ea5ee4bd78df5f0037e9c0263 2024-11-25T19:25:51,949 INFO [M:0;6ef6ccb75414:34569 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for aae0fc9ea5ee4bd78df5f0037e9c0263 2024-11-25T19:25:51,967 DEBUG [M:0;6ef6ccb75414:34569 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8d468b3eefd246c285a3d20fbd1c0545 is 69, key is 6ef6ccb75414,46159,1732562655686/rs:state/1732562657354/Put/seqid=0 2024-11-25T19:25:51,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741857_1033 (size=5156) 2024-11-25T19:25:51,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741857_1033 (size=5156) 2024-11-25T19:25:51,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46159-0x100785770e30001, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:25:51,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46159-0x100785770e30001, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:25:51,976 INFO [RS:0;6ef6ccb75414:46159 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T19:25:51,976 INFO [RS:0;6ef6ccb75414:46159 {}] regionserver.HRegionServer(1031): Exiting; stopping=6ef6ccb75414,46159,1732562655686; zookeeper connection closed. 2024-11-25T19:25:51,976 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6bccc6a0 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6bccc6a0 2024-11-25T19:25:51,977 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-25T19:25:52,376 INFO [M:0;6ef6ccb75414:34569 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8d468b3eefd246c285a3d20fbd1c0545 2024-11-25T19:25:52,403 DEBUG [M:0;6ef6ccb75414:34569 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/801de0cace7a42e4b74188097979543c is 52, key is load_balancer_on/state:d/1732562658614/Put/seqid=0 2024-11-25T19:25:52,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741858_1034 (size=5056) 2024-11-25T19:25:52,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741858_1034 (size=5056) 2024-11-25T19:25:52,412 INFO [M:0;6ef6ccb75414:34569 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/801de0cace7a42e4b74188097979543c 2024-11-25T19:25:52,420 DEBUG [M:0;6ef6ccb75414:34569 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7ad1adc0d50c4a7da0894a6bcc64bb07 as hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7ad1adc0d50c4a7da0894a6bcc64bb07 2024-11-25T19:25:52,427 INFO [M:0;6ef6ccb75414:34569 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7ad1adc0d50c4a7da0894a6bcc64bb07, entries=8, sequenceid=59, filesize=5.5 K 2024-11-25T19:25:52,429 DEBUG [M:0;6ef6ccb75414:34569 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/aae0fc9ea5ee4bd78df5f0037e9c0263 as hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/aae0fc9ea5ee4bd78df5f0037e9c0263 2024-11-25T19:25:52,436 INFO [M:0;6ef6ccb75414:34569 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for aae0fc9ea5ee4bd78df5f0037e9c0263 2024-11-25T19:25:52,436 INFO [M:0;6ef6ccb75414:34569 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/aae0fc9ea5ee4bd78df5f0037e9c0263, entries=6, sequenceid=59, filesize=6.1 K 2024-11-25T19:25:52,438 DEBUG [M:0;6ef6ccb75414:34569 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8d468b3eefd246c285a3d20fbd1c0545 as hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8d468b3eefd246c285a3d20fbd1c0545 2024-11-25T19:25:52,444 INFO [M:0;6ef6ccb75414:34569 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8d468b3eefd246c285a3d20fbd1c0545, entries=1, sequenceid=59, filesize=5.0 K 2024-11-25T19:25:52,445 DEBUG [M:0;6ef6ccb75414:34569 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/801de0cace7a42e4b74188097979543c as hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/801de0cace7a42e4b74188097979543c 2024-11-25T19:25:52,452 INFO [M:0;6ef6ccb75414:34569 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/801de0cace7a42e4b74188097979543c, entries=1, sequenceid=59, filesize=4.9 K 2024-11-25T19:25:52,453 INFO [M:0;6ef6ccb75414:34569 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 562ms, sequenceid=59, compaction requested=false 2024-11-25T19:25:52,455 INFO [M:0;6ef6ccb75414:34569 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:25:52,455 DEBUG [M:0;6ef6ccb75414:34569 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732562751891Disabling compacts and flushes for region at 1732562751891Disabling writes for close at 1732562751891Obtaining lock to block concurrent updates at 1732562751891Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732562751891Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1732562751891Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732562751892 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732562751892Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732562751907 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732562751907Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732562751921 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732562751936 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732562751936Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732562751950 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732562751966 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732562751966Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732562752389 (+423 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732562752402 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732562752402Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5a683506: reopening flushed file at 1732562752418 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@26cf9752: reopening flushed file at 1732562752428 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@511335a8: reopening flushed file at 1732562752436 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ef4606f: reopening flushed file at 1732562752444 (+8 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 562ms, sequenceid=59, compaction requested=false at 1732562752453 (+9 ms)Writing region close event to WAL at 1732562752455 (+2 ms)Closed at 1732562752455 2024-11-25T19:25:52,456 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:52,456 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:52,456 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:52,456 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:52,456 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:52,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44037 is added to blk_1073741830_1006 (size=27973) 2024-11-25T19:25:52,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41765 is added to blk_1073741830_1006 (size=27973) 2024-11-25T19:25:52,459 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T19:25:52,459 INFO [M:0;6ef6ccb75414:34569 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T19:25:52,460 INFO [M:0;6ef6ccb75414:34569 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34569 2024-11-25T19:25:52,460 INFO [M:0;6ef6ccb75414:34569 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T19:25:52,561 INFO [M:0;6ef6ccb75414:34569 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T19:25:52,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:25:52,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34569-0x100785770e30000, quorum=127.0.0.1:60717, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:25:52,566 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:25:52,568 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:25:52,568 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:25:52,568 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:25:52,568 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/hadoop.log.dir/,STOPPED} 2024-11-25T19:25:52,571 WARN [BP-1956332910-172.17.0.2-1732562652023 heartbeating to localhost/127.0.0.1:35499 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T19:25:52,571 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T19:25:52,571 WARN [BP-1956332910-172.17.0.2-1732562652023 heartbeating to localhost/127.0.0.1:35499 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1956332910-172.17.0.2-1732562652023 (Datanode Uuid 01c92f29-cfbd-4131-9d6d-e5c27e28e3b7) service to localhost/127.0.0.1:35499 2024-11-25T19:25:52,571 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T19:25:52,572 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/cluster_6c29350f-7e7e-fdb0-c988-af83e7e25fff/data/data3/current/BP-1956332910-172.17.0.2-1732562652023 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:25:52,572 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/cluster_6c29350f-7e7e-fdb0-c988-af83e7e25fff/data/data4/current/BP-1956332910-172.17.0.2-1732562652023 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:25:52,573 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T19:25:52,576 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:25:52,576 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:25:52,576 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:25:52,576 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:25:52,577 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/hadoop.log.dir/,STOPPED} 2024-11-25T19:25:52,578 WARN [BP-1956332910-172.17.0.2-1732562652023 heartbeating to localhost/127.0.0.1:35499 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T19:25:52,578 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T19:25:52,578 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T19:25:52,578 WARN [BP-1956332910-172.17.0.2-1732562652023 heartbeating to localhost/127.0.0.1:35499 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1956332910-172.17.0.2-1732562652023 (Datanode Uuid 21fec8a6-fc95-48fd-8081-770a0bdcffc0) service to localhost/127.0.0.1:35499 2024-11-25T19:25:52,579 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/cluster_6c29350f-7e7e-fdb0-c988-af83e7e25fff/data/data1/current/BP-1956332910-172.17.0.2-1732562652023 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:25:52,579 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/cluster_6c29350f-7e7e-fdb0-c988-af83e7e25fff/data/data2/current/BP-1956332910-172.17.0.2-1732562652023 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:25:52,580 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T19:25:52,590 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T19:25:52,591 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:25:52,591 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:25:52,591 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:25:52,592 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/hadoop.log.dir/,STOPPED} 2024-11-25T19:25:52,600 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T19:25:52,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T19:25:52,643 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=78 (was 12) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35499 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35499 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35499 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35499 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:35499 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35499 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/6ef6ccb75414:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/6ef6ccb75414:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/6ef6ccb75414:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35499 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35499 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@60424bca java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=178 (was 254), ProcessCount=11 (was 11), AvailableMemoryMB=5802 (was 7215) 2024-11-25T19:25:52,650 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=79, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=178, ProcessCount=11, AvailableMemoryMB=5801 2024-11-25T19:25:52,650 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T19:25:52,651 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/hadoop.log.dir so I do NOT create it in target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02 2024-11-25T19:25:52,651 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36123f8e-c060-a54b-d799-f7945478b9b2/hadoop.tmp.dir so I do NOT create it in target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02 2024-11-25T19:25:52,651 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/cluster_80d7549f-7c45-a330-cc4f-ba992e4972cc, deleteOnExit=true 2024-11-25T19:25:52,651 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-25T19:25:52,651 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/test.cache.data in system properties and HBase conf 2024-11-25T19:25:52,651 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T19:25:52,651 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/hadoop.log.dir in system properties and HBase conf 2024-11-25T19:25:52,651 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T19:25:52,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T19:25:52,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T19:25:52,652 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T19:25:52,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T19:25:52,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T19:25:52,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T19:25:52,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T19:25:52,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T19:25:52,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T19:25:52,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T19:25:52,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T19:25:52,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T19:25:52,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/nfs.dump.dir in system properties and HBase conf 2024-11-25T19:25:52,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/java.io.tmpdir in system properties and HBase conf 2024-11-25T19:25:52,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T19:25:52,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T19:25:52,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T19:25:52,666 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T19:25:52,716 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:25:52,722 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:25:52,723 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:25:52,723 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:25:52,723 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T19:25:52,724 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:25:52,725 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3197ca45{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:25:52,725 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45e3157d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:25:52,828 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@511dc70f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/java.io.tmpdir/jetty-localhost-40339-hadoop-hdfs-3_4_1-tests_jar-_-any-2409865773169755006/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T19:25:52,829 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e469283{HTTP/1.1, (http/1.1)}{localhost:40339} 2024-11-25T19:25:52,829 INFO [Time-limited test {}] server.Server(415): Started @103417ms 2024-11-25T19:25:52,842 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T19:25:52,897 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:25:52,901 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:25:52,902 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:25:52,902 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:25:52,902 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T19:25:52,903 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4edee9ab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:25:52,903 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@276f8783{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:25:52,998 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5d4bdc00{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/java.io.tmpdir/jetty-localhost-44077-hadoop-hdfs-3_4_1-tests_jar-_-any-6233956579300901464/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:25:52,998 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@687b21ce{HTTP/1.1, (http/1.1)}{localhost:44077} 2024-11-25T19:25:52,998 INFO [Time-limited test {}] server.Server(415): Started @103587ms 2024-11-25T19:25:53,000 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T19:25:53,038 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:25:53,043 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:25:53,044 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:25:53,044 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:25:53,044 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T19:25:53,044 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b44e274{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:25:53,045 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@376d199b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:25:53,065 WARN [Thread-435 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/cluster_80d7549f-7c45-a330-cc4f-ba992e4972cc/data/data1/current/BP-234184799-172.17.0.2-1732562752678/current, will proceed with Du for space computation calculation, 2024-11-25T19:25:53,065 WARN [Thread-436 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/cluster_80d7549f-7c45-a330-cc4f-ba992e4972cc/data/data2/current/BP-234184799-172.17.0.2-1732562752678/current, will proceed with Du for space computation calculation, 2024-11-25T19:25:53,082 WARN [Thread-414 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T19:25:53,085 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x94b4cc9075999de4 with lease ID 0x225a49161454f33d: Processing first storage report for DS-f4265321-e5c9-491c-89a6-1e02e1319373 from datanode DatanodeRegistration(127.0.0.1:38643, datanodeUuid=ceb72cc6-2142-4acb-8f9a-3958fef348c6, infoPort=39961, infoSecurePort=0, ipcPort=42005, storageInfo=lv=-57;cid=testClusterID;nsid=1136297964;c=1732562752678) 2024-11-25T19:25:53,085 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x94b4cc9075999de4 with lease ID 0x225a49161454f33d: from storage DS-f4265321-e5c9-491c-89a6-1e02e1319373 node DatanodeRegistration(127.0.0.1:38643, datanodeUuid=ceb72cc6-2142-4acb-8f9a-3958fef348c6, infoPort=39961, infoSecurePort=0, ipcPort=42005, storageInfo=lv=-57;cid=testClusterID;nsid=1136297964;c=1732562752678), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:25:53,085 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x94b4cc9075999de4 with lease ID 0x225a49161454f33d: Processing first storage report for DS-35b23f67-64e7-4316-ab0f-5fa84563382c from datanode DatanodeRegistration(127.0.0.1:38643, datanodeUuid=ceb72cc6-2142-4acb-8f9a-3958fef348c6, infoPort=39961, infoSecurePort=0, ipcPort=42005, storageInfo=lv=-57;cid=testClusterID;nsid=1136297964;c=1732562752678) 2024-11-25T19:25:53,085 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x94b4cc9075999de4 with lease ID 0x225a49161454f33d: from storage DS-35b23f67-64e7-4316-ab0f-5fa84563382c node DatanodeRegistration(127.0.0.1:38643, datanodeUuid=ceb72cc6-2142-4acb-8f9a-3958fef348c6, infoPort=39961, infoSecurePort=0, ipcPort=42005, storageInfo=lv=-57;cid=testClusterID;nsid=1136297964;c=1732562752678), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:25:53,145 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@824b6ae{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/java.io.tmpdir/jetty-localhost-41379-hadoop-hdfs-3_4_1-tests_jar-_-any-17141210916941709195/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:25:53,145 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d7e5c06{HTTP/1.1, (http/1.1)}{localhost:41379} 2024-11-25T19:25:53,145 INFO [Time-limited test {}] server.Server(415): Started @103734ms 2024-11-25T19:25:53,147 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T19:25:53,212 WARN [Thread-461 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/cluster_80d7549f-7c45-a330-cc4f-ba992e4972cc/data/data3/current/BP-234184799-172.17.0.2-1732562752678/current, will proceed with Du for space computation calculation, 2024-11-25T19:25:53,212 WARN [Thread-462 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/cluster_80d7549f-7c45-a330-cc4f-ba992e4972cc/data/data4/current/BP-234184799-172.17.0.2-1732562752678/current, will proceed with Du for space computation calculation, 2024-11-25T19:25:53,231 WARN [Thread-450 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T19:25:53,234 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5d3ec54fef202827 with lease ID 0x225a49161454f33e: Processing first storage report for DS-c5f6105b-5f80-468a-ab92-eb8fc57ff309 from datanode DatanodeRegistration(127.0.0.1:42843, datanodeUuid=16bf159f-3f22-4c2d-a534-7a26af8a5f1e, infoPort=46281, infoSecurePort=0, ipcPort=44995, storageInfo=lv=-57;cid=testClusterID;nsid=1136297964;c=1732562752678) 2024-11-25T19:25:53,234 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5d3ec54fef202827 with lease ID 0x225a49161454f33e: from storage DS-c5f6105b-5f80-468a-ab92-eb8fc57ff309 node DatanodeRegistration(127.0.0.1:42843, datanodeUuid=16bf159f-3f22-4c2d-a534-7a26af8a5f1e, infoPort=46281, infoSecurePort=0, ipcPort=44995, storageInfo=lv=-57;cid=testClusterID;nsid=1136297964;c=1732562752678), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:25:53,235 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5d3ec54fef202827 with lease ID 0x225a49161454f33e: Processing first storage report for DS-dbb84f9b-f379-4a15-ac07-905230d88c35 from datanode DatanodeRegistration(127.0.0.1:42843, datanodeUuid=16bf159f-3f22-4c2d-a534-7a26af8a5f1e, infoPort=46281, infoSecurePort=0, ipcPort=44995, storageInfo=lv=-57;cid=testClusterID;nsid=1136297964;c=1732562752678) 2024-11-25T19:25:53,235 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5d3ec54fef202827 with lease ID 0x225a49161454f33e: from storage DS-dbb84f9b-f379-4a15-ac07-905230d88c35 node DatanodeRegistration(127.0.0.1:42843, datanodeUuid=16bf159f-3f22-4c2d-a534-7a26af8a5f1e, infoPort=46281, infoSecurePort=0, ipcPort=44995, storageInfo=lv=-57;cid=testClusterID;nsid=1136297964;c=1732562752678), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:25:53,275 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02 2024-11-25T19:25:53,278 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/cluster_80d7549f-7c45-a330-cc4f-ba992e4972cc/zookeeper_0, clientPort=53135, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/cluster_80d7549f-7c45-a330-cc4f-ba992e4972cc/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/cluster_80d7549f-7c45-a330-cc4f-ba992e4972cc/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T19:25:53,281 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53135 2024-11-25T19:25:53,281 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:25:53,283 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:25:53,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42843 is added to blk_1073741825_1001 (size=7) 2024-11-25T19:25:53,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741825_1001 (size=7) 2024-11-25T19:25:53,297 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817 with version=8 2024-11-25T19:25:53,297 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/hbase-staging 2024-11-25T19:25:53,299 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6ef6ccb75414:0 server-side Connection retries=45 2024-11-25T19:25:53,300 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:25:53,300 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T19:25:53,300 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T19:25:53,300 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:25:53,300 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T19:25:53,300 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T19:25:53,300 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T19:25:53,301 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35063 2024-11-25T19:25:53,303 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35063 connecting to ZooKeeper ensemble=127.0.0.1:53135 2024-11-25T19:25:53,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:350630x0, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T19:25:53,307 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35063-0x1007858f36c0000 connected 2024-11-25T19:25:53,318 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:25:53,321 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:25:53,324 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:25:53,325 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817, hbase.cluster.distributed=false 2024-11-25T19:25:53,327 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T19:25:53,328 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35063 2024-11-25T19:25:53,328 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35063 2024-11-25T19:25:53,328 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35063 2024-11-25T19:25:53,329 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35063 2024-11-25T19:25:53,329 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35063 2024-11-25T19:25:53,346 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6ef6ccb75414:0 server-side Connection retries=45 2024-11-25T19:25:53,347 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:25:53,347 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T19:25:53,347 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T19:25:53,347 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:25:53,347 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T19:25:53,347 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T19:25:53,347 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T19:25:53,348 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35415 2024-11-25T19:25:53,349 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35415 connecting to ZooKeeper ensemble=127.0.0.1:53135 2024-11-25T19:25:53,350 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:25:53,352 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:25:53,356 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:354150x0, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T19:25:53,356 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:354150x0, quorum=127.0.0.1:53135, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:25:53,356 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35415-0x1007858f36c0001 connected 2024-11-25T19:25:53,357 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T19:25:53,359 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T19:25:53,360 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35415-0x1007858f36c0001, quorum=127.0.0.1:53135, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T19:25:53,361 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35415-0x1007858f36c0001, quorum=127.0.0.1:53135, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T19:25:53,363 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35415 2024-11-25T19:25:53,364 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35415 2024-11-25T19:25:53,365 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35415 2024-11-25T19:25:53,365 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35415 2024-11-25T19:25:53,366 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35415 2024-11-25T19:25:53,381 DEBUG [M:0;6ef6ccb75414:35063 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6ef6ccb75414:35063 2024-11-25T19:25:53,382 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6ef6ccb75414,35063,1732562753299 2024-11-25T19:25:53,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:25:53,383 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x1007858f36c0001, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:25:53,384 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6ef6ccb75414,35063,1732562753299 2024-11-25T19:25:53,385 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x1007858f36c0001, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T19:25:53,385 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x1007858f36c0001, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:53,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:53,385 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T19:25:53,386 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6ef6ccb75414,35063,1732562753299 from backup master directory 2024-11-25T19:25:53,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6ef6ccb75414,35063,1732562753299 2024-11-25T19:25:53,387 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x1007858f36c0001, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:25:53,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:25:53,387 WARN [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T19:25:53,387 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6ef6ccb75414,35063,1732562753299 2024-11-25T19:25:53,395 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/hbase.id] with ID: 2e0d4155-5b31-4c7a-826c-f4c7fd53e916 2024-11-25T19:25:53,395 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/.tmp/hbase.id 2024-11-25T19:25:53,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741826_1002 (size=42) 2024-11-25T19:25:53,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42843 is added to blk_1073741826_1002 (size=42) 2024-11-25T19:25:53,405 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/.tmp/hbase.id]:[hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/hbase.id] 2024-11-25T19:25:53,419 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:25:53,419 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T19:25:53,421 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-25T19:25:53,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:53,423 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x1007858f36c0001, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:53,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741827_1003 (size=196) 2024-11-25T19:25:53,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42843 is added to blk_1073741827_1003 (size=196) 2024-11-25T19:25:53,431 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T19:25:53,432 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T19:25:53,432 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:25:53,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42843 is added to blk_1073741828_1004 (size=1189) 2024-11-25T19:25:53,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741828_1004 (size=1189) 2024-11-25T19:25:53,445 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store 2024-11-25T19:25:53,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42843 is added to blk_1073741829_1005 (size=34) 2024-11-25T19:25:53,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741829_1005 (size=34) 2024-11-25T19:25:53,454 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:25:53,455 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T19:25:53,455 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:25:53,455 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:25:53,455 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T19:25:53,455 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:25:53,455 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:25:53,455 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732562753454Disabling compacts and flushes for region at 1732562753454Disabling writes for close at 1732562753455 (+1 ms)Writing region close event to WAL at 1732562753455Closed at 1732562753455 2024-11-25T19:25:53,456 WARN [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/.initializing 2024-11-25T19:25:53,456 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/WALs/6ef6ccb75414,35063,1732562753299 2024-11-25T19:25:53,459 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ef6ccb75414%2C35063%2C1732562753299, suffix=, logDir=hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/WALs/6ef6ccb75414,35063,1732562753299, archiveDir=hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/oldWALs, maxLogs=10 2024-11-25T19:25:53,460 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C35063%2C1732562753299.1732562753460 2024-11-25T19:25:53,465 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/WALs/6ef6ccb75414,35063,1732562753299/6ef6ccb75414%2C35063%2C1732562753299.1732562753460 2024-11-25T19:25:53,466 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39961:39961),(127.0.0.1/127.0.0.1:46281:46281)] 2024-11-25T19:25:53,467 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T19:25:53,467 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:25:53,467 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:53,467 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:53,469 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:53,471 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T19:25:53,471 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:53,472 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:25:53,472 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:53,474 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T19:25:53,474 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:53,474 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:25:53,474 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:53,477 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T19:25:53,477 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:53,478 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:25:53,478 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:53,480 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T19:25:53,480 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:53,481 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:25:53,481 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:53,482 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:53,482 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:53,484 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:53,484 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:53,484 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T19:25:53,486 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:53,488 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T19:25:53,488 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=757846, jitterRate=-0.03635020554065704}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T19:25:53,489 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732562753467Initializing all the Stores at 1732562753469 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562753469Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562753469Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562753469Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562753469Cleaning up temporary data from old regions at 1732562753484 (+15 ms)Region opened successfully at 1732562753489 (+5 ms) 2024-11-25T19:25:53,489 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T19:25:53,493 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58bee6b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ef6ccb75414/172.17.0.2:0 2024-11-25T19:25:53,494 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T19:25:53,495 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T19:25:53,495 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T19:25:53,495 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T19:25:53,495 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-25T19:25:53,496 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-25T19:25:53,496 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T19:25:53,498 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T19:25:53,499 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T19:25:53,500 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T19:25:53,501 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T19:25:53,501 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T19:25:53,502 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T19:25:53,502 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T19:25:53,504 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T19:25:53,505 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T19:25:53,506 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T19:25:53,507 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T19:25:53,509 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T19:25:53,510 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T19:25:53,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T19:25:53,511 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x1007858f36c0001, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T19:25:53,511 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x1007858f36c0001, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:53,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:53,511 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6ef6ccb75414,35063,1732562753299, sessionid=0x1007858f36c0000, setting cluster-up flag (Was=false) 2024-11-25T19:25:53,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:53,513 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x1007858f36c0001, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:53,516 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T19:25:53,517 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6ef6ccb75414,35063,1732562753299 2024-11-25T19:25:53,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:53,520 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x1007858f36c0001, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:53,523 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T19:25:53,525 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6ef6ccb75414,35063,1732562753299 2024-11-25T19:25:53,526 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T19:25:53,528 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T19:25:53,528 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T19:25:53,528 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T19:25:53,528 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6ef6ccb75414,35063,1732562753299 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T19:25:53,530 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:25:53,530 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:25:53,530 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:25:53,530 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:25:53,530 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6ef6ccb75414:0, corePoolSize=10, maxPoolSize=10 2024-11-25T19:25:53,530 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:53,530 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=2, maxPoolSize=2 2024-11-25T19:25:53,530 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:53,531 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732562783531 2024-11-25T19:25:53,531 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T19:25:53,531 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T19:25:53,531 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T19:25:53,531 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T19:25:53,531 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T19:25:53,531 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T19:25:53,531 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:53,532 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T19:25:53,532 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T19:25:53,532 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T19:25:53,532 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T19:25:53,532 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T19:25:53,532 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T19:25:53,532 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T19:25:53,533 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.large.0-1732562753533,5,FailOnTimeoutGroup] 2024-11-25T19:25:53,533 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.small.0-1732562753533,5,FailOnTimeoutGroup] 2024-11-25T19:25:53,533 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:53,533 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T19:25:53,533 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:53,533 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:53,533 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:53,533 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T19:25:53,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42843 is added to blk_1073741831_1007 (size=1321) 2024-11-25T19:25:53,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741831_1007 (size=1321) 2024-11-25T19:25:53,542 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T19:25:53,543 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817 2024-11-25T19:25:53,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42843 is added to blk_1073741832_1008 (size=32) 2024-11-25T19:25:53,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741832_1008 (size=32) 2024-11-25T19:25:53,553 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:25:53,555 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T19:25:53,558 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T19:25:53,558 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:53,559 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:25:53,559 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T19:25:53,561 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T19:25:53,561 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:53,562 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:25:53,562 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T19:25:53,564 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T19:25:53,564 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:53,565 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:25:53,565 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T19:25:53,567 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T19:25:53,567 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:53,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:25:53,568 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T19:25:53,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/data/hbase/meta/1588230740 2024-11-25T19:25:53,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/data/hbase/meta/1588230740 2024-11-25T19:25:53,569 INFO [RS:0;6ef6ccb75414:35415 {}] regionserver.HRegionServer(746): ClusterId : 2e0d4155-5b31-4c7a-826c-f4c7fd53e916 2024-11-25T19:25:53,569 DEBUG [RS:0;6ef6ccb75414:35415 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T19:25:53,571 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T19:25:53,571 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T19:25:53,571 DEBUG [RS:0;6ef6ccb75414:35415 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T19:25:53,571 DEBUG [RS:0;6ef6ccb75414:35415 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T19:25:53,571 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T19:25:53,573 DEBUG [RS:0;6ef6ccb75414:35415 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T19:25:53,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T19:25:53,573 DEBUG [RS:0;6ef6ccb75414:35415 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f0a0b85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ef6ccb75414/172.17.0.2:0 2024-11-25T19:25:53,576 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T19:25:53,576 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=762597, jitterRate=-0.030308842658996582}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T19:25:53,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732562753553Initializing all the Stores at 1732562753555 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562753555Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562753555Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562753555Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562753555Cleaning up temporary data from old regions at 1732562753571 (+16 ms)Region opened successfully at 1732562753577 (+6 ms) 2024-11-25T19:25:53,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T19:25:53,578 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T19:25:53,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T19:25:53,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T19:25:53,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T19:25:53,579 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T19:25:53,579 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732562753578Disabling compacts and flushes for region at 1732562753578Disabling writes for close at 1732562753578Writing region close event to WAL at 1732562753579 (+1 ms)Closed at 1732562753579 2024-11-25T19:25:53,580 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T19:25:53,581 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T19:25:53,581 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T19:25:53,583 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T19:25:53,585 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T19:25:53,586 DEBUG [RS:0;6ef6ccb75414:35415 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6ef6ccb75414:35415 2024-11-25T19:25:53,587 INFO [RS:0;6ef6ccb75414:35415 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T19:25:53,587 INFO [RS:0;6ef6ccb75414:35415 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T19:25:53,587 DEBUG [RS:0;6ef6ccb75414:35415 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T19:25:53,588 INFO [RS:0;6ef6ccb75414:35415 {}] regionserver.HRegionServer(2659): reportForDuty to master=6ef6ccb75414,35063,1732562753299 with port=35415, startcode=1732562753346 2024-11-25T19:25:53,588 DEBUG [RS:0;6ef6ccb75414:35415 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T19:25:53,590 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57717, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T19:25:53,591 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35063 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6ef6ccb75414,35415,1732562753346 2024-11-25T19:25:53,591 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35063 {}] master.ServerManager(517): Registering regionserver=6ef6ccb75414,35415,1732562753346 2024-11-25T19:25:53,593 DEBUG [RS:0;6ef6ccb75414:35415 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817 2024-11-25T19:25:53,593 DEBUG [RS:0;6ef6ccb75414:35415 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33701 2024-11-25T19:25:53,593 DEBUG [RS:0;6ef6ccb75414:35415 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T19:25:53,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T19:25:53,595 DEBUG [RS:0;6ef6ccb75414:35415 {}] zookeeper.ZKUtil(111): regionserver:35415-0x1007858f36c0001, quorum=127.0.0.1:53135, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6ef6ccb75414,35415,1732562753346 2024-11-25T19:25:53,595 WARN [RS:0;6ef6ccb75414:35415 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T19:25:53,595 INFO [RS:0;6ef6ccb75414:35415 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:25:53,595 DEBUG [RS:0;6ef6ccb75414:35415 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/WALs/6ef6ccb75414,35415,1732562753346 2024-11-25T19:25:53,596 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6ef6ccb75414,35415,1732562753346] 2024-11-25T19:25:53,600 INFO [RS:0;6ef6ccb75414:35415 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T19:25:53,603 INFO [RS:0;6ef6ccb75414:35415 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T19:25:53,603 INFO [RS:0;6ef6ccb75414:35415 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T19:25:53,603 INFO [RS:0;6ef6ccb75414:35415 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:53,603 INFO [RS:0;6ef6ccb75414:35415 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T19:25:53,604 INFO [RS:0;6ef6ccb75414:35415 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T19:25:53,604 INFO [RS:0;6ef6ccb75414:35415 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:53,604 DEBUG [RS:0;6ef6ccb75414:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:53,605 DEBUG [RS:0;6ef6ccb75414:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:53,605 DEBUG [RS:0;6ef6ccb75414:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:53,605 DEBUG [RS:0;6ef6ccb75414:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:53,605 DEBUG [RS:0;6ef6ccb75414:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:53,605 DEBUG [RS:0;6ef6ccb75414:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6ef6ccb75414:0, corePoolSize=2, maxPoolSize=2 2024-11-25T19:25:53,605 DEBUG [RS:0;6ef6ccb75414:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:53,605 DEBUG [RS:0;6ef6ccb75414:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:53,605 DEBUG [RS:0;6ef6ccb75414:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:53,605 DEBUG [RS:0;6ef6ccb75414:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:53,605 DEBUG [RS:0;6ef6ccb75414:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:53,605 DEBUG [RS:0;6ef6ccb75414:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:53,605 DEBUG [RS:0;6ef6ccb75414:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6ef6ccb75414:0, corePoolSize=3, maxPoolSize=3 2024-11-25T19:25:53,606 DEBUG [RS:0;6ef6ccb75414:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0, corePoolSize=3, maxPoolSize=3 2024-11-25T19:25:53,609 INFO [RS:0;6ef6ccb75414:35415 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:53,609 INFO [RS:0;6ef6ccb75414:35415 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:53,609 INFO [RS:0;6ef6ccb75414:35415 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:53,609 INFO [RS:0;6ef6ccb75414:35415 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:53,610 INFO [RS:0;6ef6ccb75414:35415 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:53,610 INFO [RS:0;6ef6ccb75414:35415 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,35415,1732562753346-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T19:25:53,623 INFO [RS:0;6ef6ccb75414:35415 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T19:25:53,624 INFO [RS:0;6ef6ccb75414:35415 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,35415,1732562753346-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:53,624 INFO [RS:0;6ef6ccb75414:35415 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:53,624 INFO [RS:0;6ef6ccb75414:35415 {}] regionserver.Replication(171): 6ef6ccb75414,35415,1732562753346 started 2024-11-25T19:25:53,638 INFO [RS:0;6ef6ccb75414:35415 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:53,638 INFO [RS:0;6ef6ccb75414:35415 {}] regionserver.HRegionServer(1482): Serving as 6ef6ccb75414,35415,1732562753346, RpcServer on 6ef6ccb75414/172.17.0.2:35415, sessionid=0x1007858f36c0001 2024-11-25T19:25:53,638 DEBUG [RS:0;6ef6ccb75414:35415 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T19:25:53,638 DEBUG [RS:0;6ef6ccb75414:35415 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6ef6ccb75414,35415,1732562753346 2024-11-25T19:25:53,638 DEBUG [RS:0;6ef6ccb75414:35415 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ef6ccb75414,35415,1732562753346' 2024-11-25T19:25:53,638 DEBUG [RS:0;6ef6ccb75414:35415 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T19:25:53,639 DEBUG [RS:0;6ef6ccb75414:35415 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T19:25:53,640 DEBUG [RS:0;6ef6ccb75414:35415 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T19:25:53,640 DEBUG [RS:0;6ef6ccb75414:35415 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T19:25:53,640 DEBUG [RS:0;6ef6ccb75414:35415 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6ef6ccb75414,35415,1732562753346 2024-11-25T19:25:53,640 DEBUG [RS:0;6ef6ccb75414:35415 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ef6ccb75414,35415,1732562753346' 2024-11-25T19:25:53,640 DEBUG [RS:0;6ef6ccb75414:35415 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T19:25:53,640 DEBUG [RS:0;6ef6ccb75414:35415 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T19:25:53,641 DEBUG [RS:0;6ef6ccb75414:35415 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T19:25:53,641 INFO [RS:0;6ef6ccb75414:35415 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T19:25:53,641 INFO [RS:0;6ef6ccb75414:35415 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T19:25:53,735 WARN [6ef6ccb75414:35063 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-25T19:25:53,743 INFO [RS:0;6ef6ccb75414:35415 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ef6ccb75414%2C35415%2C1732562753346, suffix=, logDir=hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/WALs/6ef6ccb75414,35415,1732562753346, archiveDir=hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/oldWALs, maxLogs=32 2024-11-25T19:25:53,745 INFO [RS:0;6ef6ccb75414:35415 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C35415%2C1732562753346.1732562753745 2024-11-25T19:25:53,753 INFO [RS:0;6ef6ccb75414:35415 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/WALs/6ef6ccb75414,35415,1732562753346/6ef6ccb75414%2C35415%2C1732562753346.1732562753745 2024-11-25T19:25:53,758 DEBUG [RS:0;6ef6ccb75414:35415 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46281:46281),(127.0.0.1/127.0.0.1:39961:39961)] 2024-11-25T19:25:53,985 DEBUG [6ef6ccb75414:35063 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-25T19:25:53,986 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6ef6ccb75414,35415,1732562753346 2024-11-25T19:25:53,989 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6ef6ccb75414,35415,1732562753346, state=OPENING 2024-11-25T19:25:53,992 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T19:25:53,994 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x1007858f36c0001, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:53,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:53,995 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:25:53,995 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T19:25:53,995 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:25:53,996 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6ef6ccb75414,35415,1732562753346}] 2024-11-25T19:25:54,152 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T19:25:54,156 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41817, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T19:25:54,165 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T19:25:54,165 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:25:54,168 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ef6ccb75414%2C35415%2C1732562753346.meta, suffix=.meta, logDir=hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/WALs/6ef6ccb75414,35415,1732562753346, archiveDir=hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/oldWALs, maxLogs=32 2024-11-25T19:25:54,171 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C35415%2C1732562753346.meta.1732562754171.meta 2024-11-25T19:25:54,180 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/WALs/6ef6ccb75414,35415,1732562753346/6ef6ccb75414%2C35415%2C1732562753346.meta.1732562754171.meta 2024-11-25T19:25:54,189 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39961:39961),(127.0.0.1/127.0.0.1:46281:46281)] 2024-11-25T19:25:54,193 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T19:25:54,194 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T19:25:54,194 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T19:25:54,194 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T19:25:54,194 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T19:25:54,194 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:25:54,195 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T19:25:54,195 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T19:25:54,198 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T19:25:54,200 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T19:25:54,200 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:54,201 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:25:54,201 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T19:25:54,203 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T19:25:54,203 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:54,204 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:25:54,204 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T19:25:54,205 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T19:25:54,205 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:54,206 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:25:54,206 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T19:25:54,208 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T19:25:54,208 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:54,208 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:25:54,209 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T19:25:54,210 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/data/hbase/meta/1588230740 2024-11-25T19:25:54,212 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/data/hbase/meta/1588230740 2024-11-25T19:25:54,213 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T19:25:54,214 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T19:25:54,215 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T19:25:54,218 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T19:25:54,220 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=737322, jitterRate=-0.06244675815105438}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T19:25:54,220 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T19:25:54,222 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732562754195Writing region info on filesystem at 1732562754195Initializing all the Stores at 1732562754197 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562754197Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562754198 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562754198Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562754198Cleaning up temporary data from old regions at 1732562754214 (+16 ms)Running coprocessor post-open hooks at 1732562754220 (+6 ms)Region opened successfully at 1732562754222 (+2 ms) 2024-11-25T19:25:54,224 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732562754152 2024-11-25T19:25:54,228 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T19:25:54,228 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T19:25:54,230 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6ef6ccb75414,35415,1732562753346 2024-11-25T19:25:54,232 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6ef6ccb75414,35415,1732562753346, state=OPEN 2024-11-25T19:25:54,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T19:25:54,235 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x1007858f36c0001, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T19:25:54,235 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:25:54,235 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:25:54,237 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6ef6ccb75414,35415,1732562753346 2024-11-25T19:25:54,241 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T19:25:54,241 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6ef6ccb75414,35415,1732562753346 in 241 msec 2024-11-25T19:25:54,244 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T19:25:54,244 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 660 msec 2024-11-25T19:25:54,245 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T19:25:54,246 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T19:25:54,247 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T19:25:54,247 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6ef6ccb75414,35415,1732562753346, seqNum=-1] 2024-11-25T19:25:54,248 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T19:25:54,249 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46515, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T19:25:54,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 728 msec 2024-11-25T19:25:54,257 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732562754257, completionTime=-1 2024-11-25T19:25:54,257 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-25T19:25:54,257 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T19:25:54,260 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-25T19:25:54,260 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732562814260 2024-11-25T19:25:54,260 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732562874260 2024-11-25T19:25:54,260 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-25T19:25:54,260 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,35063,1732562753299-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:54,260 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,35063,1732562753299-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:54,260 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,35063,1732562753299-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:54,260 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6ef6ccb75414:35063, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:54,260 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:54,261 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:54,263 DEBUG [master/6ef6ccb75414:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T19:25:54,266 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.879sec 2024-11-25T19:25:54,266 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T19:25:54,266 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T19:25:54,266 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T19:25:54,266 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T19:25:54,266 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T19:25:54,266 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,35063,1732562753299-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T19:25:54,266 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,35063,1732562753299-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T19:25:54,270 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T19:25:54,270 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T19:25:54,270 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ffb657f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T19:25:54,270 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,35063,1732562753299-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:54,270 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6ef6ccb75414,35063,-1 for getting cluster id 2024-11-25T19:25:54,270 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T19:25:54,273 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2e0d4155-5b31-4c7a-826c-f4c7fd53e916' 2024-11-25T19:25:54,274 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T19:25:54,274 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2e0d4155-5b31-4c7a-826c-f4c7fd53e916" 2024-11-25T19:25:54,275 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72aebd01, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T19:25:54,275 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6ef6ccb75414,35063,-1] 2024-11-25T19:25:54,275 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T19:25:54,277 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:25:54,278 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49886, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T19:25:54,280 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e5d9cb3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T19:25:54,280 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T19:25:54,282 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6ef6ccb75414,35415,1732562753346, seqNum=-1] 2024-11-25T19:25:54,282 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T19:25:54,284 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36984, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T19:25:54,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6ef6ccb75414,35063,1732562753299 2024-11-25T19:25:54,288 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:25:54,291 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-25T19:25:54,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T19:25:54,291 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T19:25:54,292 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T19:25:54,292 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:25:54,292 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:25:54,292 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T19:25:54,292 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T19:25:54,292 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=172714076, stopped=false 2024-11-25T19:25:54,292 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6ef6ccb75414,35063,1732562753299 2024-11-25T19:25:54,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T19:25:54,293 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x1007858f36c0001, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T19:25:54,293 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T19:25:54,293 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x1007858f36c0001, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:54,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:54,294 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T19:25:54,294 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35415-0x1007858f36c0001, quorum=127.0.0.1:53135, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:25:54,294 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:25:54,294 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T19:25:54,294 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:25:54,295 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6ef6ccb75414,35415,1732562753346' ***** 2024-11-25T19:25:54,295 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T19:25:54,295 INFO [RS:0;6ef6ccb75414:35415 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T19:25:54,295 INFO [RS:0;6ef6ccb75414:35415 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T19:25:54,295 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T19:25:54,295 INFO [RS:0;6ef6ccb75414:35415 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T19:25:54,295 INFO [RS:0;6ef6ccb75414:35415 {}] regionserver.HRegionServer(959): stopping server 6ef6ccb75414,35415,1732562753346 2024-11-25T19:25:54,295 INFO [RS:0;6ef6ccb75414:35415 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T19:25:54,296 INFO [RS:0;6ef6ccb75414:35415 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6ef6ccb75414:35415. 2024-11-25T19:25:54,296 DEBUG [RS:0;6ef6ccb75414:35415 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T19:25:54,296 DEBUG [RS:0;6ef6ccb75414:35415 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:25:54,296 INFO [RS:0;6ef6ccb75414:35415 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T19:25:54,296 INFO [RS:0;6ef6ccb75414:35415 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T19:25:54,296 INFO [RS:0;6ef6ccb75414:35415 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T19:25:54,296 INFO [RS:0;6ef6ccb75414:35415 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T19:25:54,296 INFO [RS:0;6ef6ccb75414:35415 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-25T19:25:54,296 DEBUG [RS:0;6ef6ccb75414:35415 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-25T19:25:54,296 DEBUG [RS:0;6ef6ccb75414:35415 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-25T19:25:54,296 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T19:25:54,297 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T19:25:54,297 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T19:25:54,297 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T19:25:54,297 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T19:25:54,297 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-25T19:25:54,316 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/data/hbase/meta/1588230740/.tmp/ns/1b9e7840b2ea4e9eb7d54f8c2cdeb8a9 is 43, key is default/ns:d/1732562754250/Put/seqid=0 2024-11-25T19:25:54,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42843 is added to blk_1073741835_1011 (size=5153) 2024-11-25T19:25:54,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741835_1011 (size=5153) 2024-11-25T19:25:54,323 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/data/hbase/meta/1588230740/.tmp/ns/1b9e7840b2ea4e9eb7d54f8c2cdeb8a9 2024-11-25T19:25:54,333 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/data/hbase/meta/1588230740/.tmp/ns/1b9e7840b2ea4e9eb7d54f8c2cdeb8a9 as hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/data/hbase/meta/1588230740/ns/1b9e7840b2ea4e9eb7d54f8c2cdeb8a9 2024-11-25T19:25:54,341 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/data/hbase/meta/1588230740/ns/1b9e7840b2ea4e9eb7d54f8c2cdeb8a9, entries=2, sequenceid=6, filesize=5.0 K 2024-11-25T19:25:54,343 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 46ms, sequenceid=6, compaction requested=false 2024-11-25T19:25:54,343 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-25T19:25:54,349 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-25T19:25:54,350 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T19:25:54,350 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T19:25:54,350 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732562754296Running coprocessor pre-close hooks at 1732562754296Disabling compacts and flushes for region at 1732562754296Disabling writes for close at 1732562754297 (+1 ms)Obtaining lock to block concurrent updates at 1732562754297Preparing flush snapshotting stores in 1588230740 at 1732562754297Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732562754298 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732562754299 (+1 ms)Flushing 1588230740/ns: creating writer at 1732562754299Flushing 1588230740/ns: appending metadata at 1732562754315 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1732562754315Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e2e894e: reopening flushed file at 1732562754332 (+17 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 46ms, sequenceid=6, compaction requested=false at 1732562754343 (+11 ms)Writing region close event to WAL at 1732562754344 (+1 ms)Running coprocessor post-close hooks at 1732562754350 (+6 ms)Closed at 1732562754350 2024-11-25T19:25:54,350 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T19:25:54,497 INFO [RS:0;6ef6ccb75414:35415 {}] regionserver.HRegionServer(976): stopping server 6ef6ccb75414,35415,1732562753346; all regions closed. 2024-11-25T19:25:54,501 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:54,501 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:54,502 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:54,502 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:54,502 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:54,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42843 is added to blk_1073741834_1010 (size=1152) 2024-11-25T19:25:54,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741834_1010 (size=1152) 2024-11-25T19:25:54,508 DEBUG [RS:0;6ef6ccb75414:35415 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/oldWALs 2024-11-25T19:25:54,508 INFO [RS:0;6ef6ccb75414:35415 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6ef6ccb75414%2C35415%2C1732562753346.meta:.meta(num 1732562754171) 2024-11-25T19:25:54,509 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:54,509 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:54,509 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:54,509 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:54,509 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:54,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741833_1009 (size=93) 2024-11-25T19:25:54,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42843 is added to blk_1073741833_1009 (size=93) 2024-11-25T19:25:54,514 DEBUG [RS:0;6ef6ccb75414:35415 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/oldWALs 2024-11-25T19:25:54,514 INFO [RS:0;6ef6ccb75414:35415 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6ef6ccb75414%2C35415%2C1732562753346:(num 1732562753745) 2024-11-25T19:25:54,515 DEBUG [RS:0;6ef6ccb75414:35415 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:25:54,515 INFO [RS:0;6ef6ccb75414:35415 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T19:25:54,515 INFO [RS:0;6ef6ccb75414:35415 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T19:25:54,515 INFO [RS:0;6ef6ccb75414:35415 {}] hbase.ChoreService(370): Chore service for: regionserver/6ef6ccb75414:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-25T19:25:54,515 INFO [RS:0;6ef6ccb75414:35415 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T19:25:54,515 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T19:25:54,515 INFO [RS:0;6ef6ccb75414:35415 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35415 2024-11-25T19:25:54,517 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x1007858f36c0001, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6ef6ccb75414,35415,1732562753346 2024-11-25T19:25:54,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T19:25:54,517 INFO [RS:0;6ef6ccb75414:35415 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T19:25:54,518 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6ef6ccb75414,35415,1732562753346] 2024-11-25T19:25:54,518 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6ef6ccb75414,35415,1732562753346 already deleted, retry=false 2024-11-25T19:25:54,519 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6ef6ccb75414,35415,1732562753346 expired; onlineServers=0 2024-11-25T19:25:54,519 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6ef6ccb75414,35063,1732562753299' ***** 2024-11-25T19:25:54,519 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T19:25:54,519 INFO [M:0;6ef6ccb75414:35063 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T19:25:54,519 INFO [M:0;6ef6ccb75414:35063 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T19:25:54,519 DEBUG [M:0;6ef6ccb75414:35063 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T19:25:54,519 DEBUG [M:0;6ef6ccb75414:35063 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T19:25:54,519 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T19:25:54,519 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.large.0-1732562753533 {}] cleaner.HFileCleaner(306): Exit Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.large.0-1732562753533,5,FailOnTimeoutGroup] 2024-11-25T19:25:54,519 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.small.0-1732562753533 {}] cleaner.HFileCleaner(306): Exit Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.small.0-1732562753533,5,FailOnTimeoutGroup] 2024-11-25T19:25:54,519 INFO [M:0;6ef6ccb75414:35063 {}] hbase.ChoreService(370): Chore service for: master/6ef6ccb75414:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T19:25:54,519 INFO [M:0;6ef6ccb75414:35063 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T19:25:54,519 DEBUG [M:0;6ef6ccb75414:35063 {}] master.HMaster(1795): Stopping service threads 2024-11-25T19:25:54,519 INFO [M:0;6ef6ccb75414:35063 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T19:25:54,520 INFO [M:0;6ef6ccb75414:35063 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T19:25:54,520 INFO [M:0;6ef6ccb75414:35063 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T19:25:54,520 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T19:25:54,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T19:25:54,520 DEBUG [M:0;6ef6ccb75414:35063 {}] zookeeper.ZKUtil(347): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-25T19:25:54,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:54,520 WARN [M:0;6ef6ccb75414:35063 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-25T19:25:54,521 INFO [M:0;6ef6ccb75414:35063 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/.lastflushedseqids 2024-11-25T19:25:54,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42843 is added to blk_1073741836_1012 (size=99) 2024-11-25T19:25:54,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741836_1012 (size=99) 2024-11-25T19:25:54,532 INFO [M:0;6ef6ccb75414:35063 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T19:25:54,532 INFO [M:0;6ef6ccb75414:35063 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T19:25:54,532 DEBUG [M:0;6ef6ccb75414:35063 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T19:25:54,532 INFO [M:0;6ef6ccb75414:35063 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:25:54,532 DEBUG [M:0;6ef6ccb75414:35063 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:25:54,532 DEBUG [M:0;6ef6ccb75414:35063 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T19:25:54,532 DEBUG [M:0;6ef6ccb75414:35063 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:25:54,532 INFO [M:0;6ef6ccb75414:35063 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-25T19:25:54,549 DEBUG [M:0;6ef6ccb75414:35063 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ca352d7d3fd142669295b7d3daab9397 is 82, key is hbase:meta,,1/info:regioninfo/1732562754229/Put/seqid=0 2024-11-25T19:25:54,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741837_1013 (size=5672) 2024-11-25T19:25:54,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42843 is added to blk_1073741837_1013 (size=5672) 2024-11-25T19:25:54,555 INFO [M:0;6ef6ccb75414:35063 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ca352d7d3fd142669295b7d3daab9397 2024-11-25T19:25:54,578 DEBUG [M:0;6ef6ccb75414:35063 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/627dd79193f94e79afb81bef5e980690 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732562754256/Put/seqid=0 2024-11-25T19:25:54,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741838_1014 (size=5275) 2024-11-25T19:25:54,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42843 is added to blk_1073741838_1014 (size=5275) 2024-11-25T19:25:54,585 INFO [M:0;6ef6ccb75414:35063 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/627dd79193f94e79afb81bef5e980690 2024-11-25T19:25:54,605 DEBUG [M:0;6ef6ccb75414:35063 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9b184a0febee4028bde75d9f5c6d6da0 is 69, key is 6ef6ccb75414,35415,1732562753346/rs:state/1732562753591/Put/seqid=0 2024-11-25T19:25:54,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42843 is added to blk_1073741839_1015 (size=5156) 2024-11-25T19:25:54,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741839_1015 (size=5156) 2024-11-25T19:25:54,611 INFO [M:0;6ef6ccb75414:35063 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9b184a0febee4028bde75d9f5c6d6da0 2024-11-25T19:25:54,618 INFO [RS:0;6ef6ccb75414:35415 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T19:25:54,618 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x1007858f36c0001, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:25:54,618 INFO [RS:0;6ef6ccb75414:35415 {}] regionserver.HRegionServer(1031): Exiting; stopping=6ef6ccb75414,35415,1732562753346; zookeeper connection closed. 2024-11-25T19:25:54,618 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x1007858f36c0001, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:25:54,618 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1ca141 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1ca141 2024-11-25T19:25:54,618 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-25T19:25:54,632 DEBUG [M:0;6ef6ccb75414:35063 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ab5c3917cfb94481be3922b326f02a28 is 52, key is load_balancer_on/state:d/1732562754290/Put/seqid=0 2024-11-25T19:25:54,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42843 is added to blk_1073741840_1016 (size=5056) 2024-11-25T19:25:54,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741840_1016 (size=5056) 2024-11-25T19:25:54,639 INFO [M:0;6ef6ccb75414:35063 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ab5c3917cfb94481be3922b326f02a28 2024-11-25T19:25:54,645 DEBUG [M:0;6ef6ccb75414:35063 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ca352d7d3fd142669295b7d3daab9397 as hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ca352d7d3fd142669295b7d3daab9397 2024-11-25T19:25:54,653 INFO [M:0;6ef6ccb75414:35063 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ca352d7d3fd142669295b7d3daab9397, entries=8, sequenceid=29, filesize=5.5 K 2024-11-25T19:25:54,654 DEBUG [M:0;6ef6ccb75414:35063 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/627dd79193f94e79afb81bef5e980690 as hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/627dd79193f94e79afb81bef5e980690 2024-11-25T19:25:54,663 INFO [M:0;6ef6ccb75414:35063 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/627dd79193f94e79afb81bef5e980690, entries=3, sequenceid=29, filesize=5.2 K 2024-11-25T19:25:54,665 DEBUG [M:0;6ef6ccb75414:35063 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9b184a0febee4028bde75d9f5c6d6da0 as hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9b184a0febee4028bde75d9f5c6d6da0 2024-11-25T19:25:54,672 INFO [M:0;6ef6ccb75414:35063 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9b184a0febee4028bde75d9f5c6d6da0, entries=1, sequenceid=29, filesize=5.0 K 2024-11-25T19:25:54,674 DEBUG [M:0;6ef6ccb75414:35063 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ab5c3917cfb94481be3922b326f02a28 as hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ab5c3917cfb94481be3922b326f02a28 2024-11-25T19:25:54,683 INFO [M:0;6ef6ccb75414:35063 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33701/user/jenkins/test-data/c24893a4-2dbd-9e78-7004-3b7a4a98f817/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ab5c3917cfb94481be3922b326f02a28, entries=1, sequenceid=29, filesize=4.9 K 2024-11-25T19:25:54,684 INFO [M:0;6ef6ccb75414:35063 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 152ms, sequenceid=29, compaction requested=false 2024-11-25T19:25:54,690 INFO [M:0;6ef6ccb75414:35063 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:25:54,690 DEBUG [M:0;6ef6ccb75414:35063 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732562754532Disabling compacts and flushes for region at 1732562754532Disabling writes for close at 1732562754532Obtaining lock to block concurrent updates at 1732562754532Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732562754532Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732562754533 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732562754534 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732562754534Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732562754549 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732562754549Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732562754562 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732562754578 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732562754578Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732562754591 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732562754604 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732562754604Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732562754617 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732562754632 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732562754632Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@18168f50: reopening flushed file at 1732562754644 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@36f77e8f: reopening flushed file at 1732562754653 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@21508331: reopening flushed file at 1732562754663 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f76d079: reopening flushed file at 1732562754673 (+10 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 152ms, sequenceid=29, compaction requested=false at 1732562754684 (+11 ms)Writing region close event to WAL at 1732562754690 (+6 ms)Closed at 1732562754690 2024-11-25T19:25:54,691 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:54,691 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:54,691 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:54,691 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:54,691 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:25:54,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42843 is added to blk_1073741830_1006 (size=10311) 2024-11-25T19:25:54,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741830_1006 (size=10311) 2024-11-25T19:25:55,096 INFO [M:0;6ef6ccb75414:35063 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T19:25:55,096 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T19:25:55,096 INFO [M:0;6ef6ccb75414:35063 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35063 2024-11-25T19:25:55,096 INFO [M:0;6ef6ccb75414:35063 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T19:25:55,198 INFO [M:0;6ef6ccb75414:35063 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T19:25:55,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:25:55,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35063-0x1007858f36c0000, quorum=127.0.0.1:53135, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:25:55,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@824b6ae{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:25:55,201 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d7e5c06{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:25:55,201 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:25:55,201 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@376d199b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:25:55,201 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b44e274{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/hadoop.log.dir/,STOPPED} 2024-11-25T19:25:55,202 WARN [BP-234184799-172.17.0.2-1732562752678 heartbeating to localhost/127.0.0.1:33701 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T19:25:55,202 WARN [BP-234184799-172.17.0.2-1732562752678 heartbeating to localhost/127.0.0.1:33701 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-234184799-172.17.0.2-1732562752678 (Datanode Uuid 16bf159f-3f22-4c2d-a534-7a26af8a5f1e) service to localhost/127.0.0.1:33701 2024-11-25T19:25:55,202 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T19:25:55,203 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T19:25:55,203 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/cluster_80d7549f-7c45-a330-cc4f-ba992e4972cc/data/data3/current/BP-234184799-172.17.0.2-1732562752678 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:25:55,203 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/cluster_80d7549f-7c45-a330-cc4f-ba992e4972cc/data/data4/current/BP-234184799-172.17.0.2-1732562752678 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:25:55,204 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T19:25:55,206 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5d4bdc00{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:25:55,206 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@687b21ce{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:25:55,206 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:25:55,206 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@276f8783{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:25:55,206 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4edee9ab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/hadoop.log.dir/,STOPPED} 2024-11-25T19:25:55,208 WARN [BP-234184799-172.17.0.2-1732562752678 heartbeating to localhost/127.0.0.1:33701 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T19:25:55,208 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T19:25:55,208 WARN [BP-234184799-172.17.0.2-1732562752678 heartbeating to localhost/127.0.0.1:33701 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-234184799-172.17.0.2-1732562752678 (Datanode Uuid ceb72cc6-2142-4acb-8f9a-3958fef348c6) service to localhost/127.0.0.1:33701 2024-11-25T19:25:55,208 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T19:25:55,209 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/cluster_80d7549f-7c45-a330-cc4f-ba992e4972cc/data/data1/current/BP-234184799-172.17.0.2-1732562752678 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:25:55,209 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/cluster_80d7549f-7c45-a330-cc4f-ba992e4972cc/data/data2/current/BP-234184799-172.17.0.2-1732562752678 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:25:55,209 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T19:25:55,216 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@511dc70f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T19:25:55,217 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e469283{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:25:55,217 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:25:55,218 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45e3157d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:25:55,218 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3197ca45{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/hadoop.log.dir/,STOPPED} 2024-11-25T19:25:55,225 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T19:25:55,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T19:25:55,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T19:25:55,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/hadoop.log.dir so I do NOT create it in target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044 2024-11-25T19:25:55,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6604370a-1bcc-5408-8f66-3f009a75ef02/hadoop.tmp.dir so I do NOT create it in target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044 2024-11-25T19:25:55,243 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd, deleteOnExit=true 2024-11-25T19:25:55,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-25T19:25:55,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/test.cache.data in system properties and HBase conf 2024-11-25T19:25:55,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T19:25:55,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/hadoop.log.dir in system properties and HBase conf 2024-11-25T19:25:55,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T19:25:55,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T19:25:55,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T19:25:55,244 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T19:25:55,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T19:25:55,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T19:25:55,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T19:25:55,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T19:25:55,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T19:25:55,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T19:25:55,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T19:25:55,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T19:25:55,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T19:25:55,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/nfs.dump.dir in system properties and HBase conf 2024-11-25T19:25:55,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/java.io.tmpdir in system properties and HBase conf 2024-11-25T19:25:55,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T19:25:55,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T19:25:55,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T19:25:55,264 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T19:25:55,332 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:25:55,340 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:25:55,342 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:25:55,342 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:25:55,343 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T19:25:55,343 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:25:55,344 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a488aac{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:25:55,345 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ed3a961{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:25:55,385 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T19:25:55,385 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T19:25:55,386 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-25T19:25:55,387 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-25T19:25:55,470 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7cd2a640{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/java.io.tmpdir/jetty-localhost-46773-hadoop-hdfs-3_4_1-tests_jar-_-any-11955857469855019029/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T19:25:55,471 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64b7b556{HTTP/1.1, (http/1.1)}{localhost:46773} 2024-11-25T19:25:55,471 INFO [Time-limited test {}] server.Server(415): Started @106059ms 2024-11-25T19:25:55,488 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T19:25:55,564 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:25:55,568 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:25:55,569 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:25:55,569 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:25:55,569 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T19:25:55,570 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@257cf4bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:25:55,571 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30e7c448{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:25:55,610 INFO [regionserver/6ef6ccb75414:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T19:25:55,674 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@ab5393f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/java.io.tmpdir/jetty-localhost-34219-hadoop-hdfs-3_4_1-tests_jar-_-any-2653247842758034679/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:25:55,675 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@47f82e76{HTTP/1.1, (http/1.1)}{localhost:34219} 2024-11-25T19:25:55,675 INFO [Time-limited test {}] server.Server(415): Started @106263ms 2024-11-25T19:25:55,677 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T19:25:55,724 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:25:55,728 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:25:55,731 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:25:55,731 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:25:55,731 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T19:25:55,732 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69bbaec1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:25:55,732 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7720beab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:25:55,764 WARN [Thread-654 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data1/current/BP-985132614-172.17.0.2-1732562755277/current, will proceed with Du for space computation calculation, 2024-11-25T19:25:55,764 WARN [Thread-655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data2/current/BP-985132614-172.17.0.2-1732562755277/current, will proceed with Du for space computation calculation, 2024-11-25T19:25:55,807 WARN [Thread-633 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T19:25:55,810 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf21fdbfad8247af2 with lease ID 0xcc28a8293c448955: Processing first storage report for DS-781665b5-4509-4c5d-b6a1-617bec384944 from datanode DatanodeRegistration(127.0.0.1:46233, datanodeUuid=4428cab8-534a-4ff9-a331-5e2fc125ec45, infoPort=33179, infoSecurePort=0, ipcPort=32821, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277) 2024-11-25T19:25:55,811 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf21fdbfad8247af2 with lease ID 0xcc28a8293c448955: from storage DS-781665b5-4509-4c5d-b6a1-617bec384944 node DatanodeRegistration(127.0.0.1:46233, datanodeUuid=4428cab8-534a-4ff9-a331-5e2fc125ec45, infoPort=33179, infoSecurePort=0, ipcPort=32821, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:25:55,811 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf21fdbfad8247af2 with lease ID 0xcc28a8293c448955: Processing first storage report for DS-14e0c2f3-f3c9-43c7-804f-cc1cd9bdb822 from datanode DatanodeRegistration(127.0.0.1:46233, datanodeUuid=4428cab8-534a-4ff9-a331-5e2fc125ec45, infoPort=33179, infoSecurePort=0, ipcPort=32821, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277) 2024-11-25T19:25:55,811 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf21fdbfad8247af2 with lease ID 0xcc28a8293c448955: from storage DS-14e0c2f3-f3c9-43c7-804f-cc1cd9bdb822 node DatanodeRegistration(127.0.0.1:46233, datanodeUuid=4428cab8-534a-4ff9-a331-5e2fc125ec45, infoPort=33179, infoSecurePort=0, ipcPort=32821, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-25T19:25:55,835 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@21d5e4af{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/java.io.tmpdir/jetty-localhost-33217-hadoop-hdfs-3_4_1-tests_jar-_-any-12216675523795883245/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:25:55,835 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@145c0180{HTTP/1.1, (http/1.1)}{localhost:33217} 2024-11-25T19:25:55,835 INFO [Time-limited test {}] server.Server(415): Started @106424ms 2024-11-25T19:25:55,837 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T19:25:55,916 WARN [Thread-680 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data3/current/BP-985132614-172.17.0.2-1732562755277/current, will proceed with Du for space computation calculation, 2024-11-25T19:25:55,917 WARN [Thread-681 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data4/current/BP-985132614-172.17.0.2-1732562755277/current, will proceed with Du for space computation calculation, 2024-11-25T19:25:55,945 WARN [Thread-669 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T19:25:55,948 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc8d1e43307eeae4f with lease ID 0xcc28a8293c448956: Processing first storage report for DS-6f27d670-4d3c-4429-8270-08141043ae32 from datanode DatanodeRegistration(127.0.0.1:36773, datanodeUuid=85f1322b-a253-4d11-a769-38483729e20a, infoPort=41591, infoSecurePort=0, ipcPort=44875, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277) 2024-11-25T19:25:55,948 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc8d1e43307eeae4f with lease ID 0xcc28a8293c448956: from storage DS-6f27d670-4d3c-4429-8270-08141043ae32 node DatanodeRegistration(127.0.0.1:36773, datanodeUuid=85f1322b-a253-4d11-a769-38483729e20a, infoPort=41591, infoSecurePort=0, ipcPort=44875, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:25:55,948 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc8d1e43307eeae4f with lease ID 0xcc28a8293c448956: Processing first storage report for DS-cbd42eba-4bfa-41a8-b588-898defd70479 from datanode DatanodeRegistration(127.0.0.1:36773, datanodeUuid=85f1322b-a253-4d11-a769-38483729e20a, infoPort=41591, infoSecurePort=0, ipcPort=44875, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277) 2024-11-25T19:25:55,949 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc8d1e43307eeae4f with lease ID 0xcc28a8293c448956: from storage DS-cbd42eba-4bfa-41a8-b588-898defd70479 node DatanodeRegistration(127.0.0.1:36773, datanodeUuid=85f1322b-a253-4d11-a769-38483729e20a, infoPort=41591, infoSecurePort=0, ipcPort=44875, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:25:55,978 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044 2024-11-25T19:25:55,981 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/zookeeper_0, clientPort=56329, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T19:25:55,982 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56329 2024-11-25T19:25:55,982 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:25:55,984 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:25:55,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46233 is added to blk_1073741825_1001 (size=7) 2024-11-25T19:25:55,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36773 is added to blk_1073741825_1001 (size=7) 2024-11-25T19:25:55,999 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555 with version=8 2024-11-25T19:25:55,999 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/hbase-staging 2024-11-25T19:25:56,002 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6ef6ccb75414:0 server-side Connection retries=45 2024-11-25T19:25:56,002 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:25:56,002 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T19:25:56,002 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T19:25:56,002 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:25:56,002 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T19:25:56,002 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T19:25:56,002 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T19:25:56,003 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40353 2024-11-25T19:25:56,004 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40353 connecting to ZooKeeper ensemble=127.0.0.1:56329 2024-11-25T19:25:56,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:403530x0, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T19:25:56,008 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40353-0x1007858fdfb0000 connected 2024-11-25T19:25:56,019 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:25:56,021 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:25:56,024 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:25:56,024 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555, hbase.cluster.distributed=false 2024-11-25T19:25:56,026 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T19:25:56,029 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40353 2024-11-25T19:25:56,030 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40353 2024-11-25T19:25:56,032 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40353 2024-11-25T19:25:56,032 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40353 2024-11-25T19:25:56,032 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40353 2024-11-25T19:25:56,053 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6ef6ccb75414:0 server-side Connection retries=45 2024-11-25T19:25:56,053 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:25:56,053 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T19:25:56,053 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T19:25:56,053 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:25:56,053 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T19:25:56,053 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T19:25:56,054 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T19:25:56,054 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38981 2024-11-25T19:25:56,057 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38981 connecting to ZooKeeper ensemble=127.0.0.1:56329 2024-11-25T19:25:56,058 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:25:56,061 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:25:56,069 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:389810x0, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T19:25:56,070 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:389810x0, quorum=127.0.0.1:56329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:25:56,070 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T19:25:56,074 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38981-0x1007858fdfb0001 connected 2024-11-25T19:25:56,085 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T19:25:56,086 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38981-0x1007858fdfb0001, quorum=127.0.0.1:56329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T19:25:56,087 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38981-0x1007858fdfb0001, quorum=127.0.0.1:56329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T19:25:56,089 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38981 2024-11-25T19:25:56,100 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38981 2024-11-25T19:25:56,105 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38981 2024-11-25T19:25:56,106 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38981 2024-11-25T19:25:56,109 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38981 2024-11-25T19:25:56,120 DEBUG [M:0;6ef6ccb75414:40353 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6ef6ccb75414:40353 2024-11-25T19:25:56,121 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6ef6ccb75414,40353,1732562756001 2024-11-25T19:25:56,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:25:56,122 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38981-0x1007858fdfb0001, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:25:56,123 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6ef6ccb75414,40353,1732562756001 2024-11-25T19:25:56,124 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38981-0x1007858fdfb0001, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T19:25:56,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:56,124 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38981-0x1007858fdfb0001, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:56,124 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T19:25:56,125 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6ef6ccb75414,40353,1732562756001 from backup master directory 2024-11-25T19:25:56,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6ef6ccb75414,40353,1732562756001 2024-11-25T19:25:56,126 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38981-0x1007858fdfb0001, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:25:56,126 WARN [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T19:25:56,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:25:56,126 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6ef6ccb75414,40353,1732562756001 2024-11-25T19:25:56,131 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/hbase.id] with ID: a138621a-782b-444c-82b6-9c93cbdbd3c6 2024-11-25T19:25:56,131 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/.tmp/hbase.id 2024-11-25T19:25:56,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36773 is added to blk_1073741826_1002 (size=42) 2024-11-25T19:25:56,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46233 is added to blk_1073741826_1002 (size=42) 2024-11-25T19:25:56,144 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/.tmp/hbase.id]:[hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/hbase.id] 2024-11-25T19:25:56,160 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:25:56,160 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T19:25:56,162 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-25T19:25:56,165 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38981-0x1007858fdfb0001, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:56,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:56,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46233 is added to blk_1073741827_1003 (size=196) 2024-11-25T19:25:56,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36773 is added to blk_1073741827_1003 (size=196) 2024-11-25T19:25:56,176 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T19:25:56,177 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T19:25:56,177 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:25:56,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36773 is added to blk_1073741828_1004 (size=1189) 2024-11-25T19:25:56,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46233 is added to blk_1073741828_1004 (size=1189) 2024-11-25T19:25:56,188 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store 2024-11-25T19:25:56,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36773 is added to blk_1073741829_1005 (size=34) 2024-11-25T19:25:56,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46233 is added to blk_1073741829_1005 (size=34) 2024-11-25T19:25:56,203 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:25:56,203 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T19:25:56,204 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:25:56,204 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:25:56,204 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T19:25:56,204 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:25:56,204 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:25:56,204 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732562756203Disabling compacts and flushes for region at 1732562756203Disabling writes for close at 1732562756204 (+1 ms)Writing region close event to WAL at 1732562756204Closed at 1732562756204 2024-11-25T19:25:56,205 WARN [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/.initializing 2024-11-25T19:25:56,205 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/WALs/6ef6ccb75414,40353,1732562756001 2024-11-25T19:25:56,209 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ef6ccb75414%2C40353%2C1732562756001, suffix=, logDir=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/WALs/6ef6ccb75414,40353,1732562756001, archiveDir=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/oldWALs, maxLogs=10 2024-11-25T19:25:56,209 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C40353%2C1732562756001.1732562756209 2024-11-25T19:25:56,220 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/WALs/6ef6ccb75414,40353,1732562756001/6ef6ccb75414%2C40353%2C1732562756001.1732562756209 2024-11-25T19:25:56,224 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33179:33179),(127.0.0.1/127.0.0.1:41591:41591)] 2024-11-25T19:25:56,225 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T19:25:56,225 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:25:56,225 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:56,225 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:56,228 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:56,230 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T19:25:56,230 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:56,230 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:25:56,230 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:56,232 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T19:25:56,232 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:56,232 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:25:56,233 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:56,234 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T19:25:56,235 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:56,235 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:25:56,235 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:56,237 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T19:25:56,237 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:56,237 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:25:56,238 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:56,238 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:56,239 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:56,240 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:56,240 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:56,241 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T19:25:56,242 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:25:56,245 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T19:25:56,245 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=823220, jitterRate=0.04677930474281311}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T19:25:56,246 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732562756225Initializing all the Stores at 1732562756226 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562756226Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562756228 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562756228Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562756228Cleaning up temporary data from old regions at 1732562756240 (+12 ms)Region opened successfully at 1732562756246 (+6 ms) 2024-11-25T19:25:56,247 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T19:25:56,250 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@357dc7f1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ef6ccb75414/172.17.0.2:0 2024-11-25T19:25:56,251 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T19:25:56,252 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T19:25:56,252 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T19:25:56,252 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T19:25:56,253 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-25T19:25:56,253 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-25T19:25:56,253 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T19:25:56,256 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T19:25:56,257 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T19:25:56,258 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T19:25:56,258 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T19:25:56,259 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T19:25:56,259 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T19:25:56,260 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T19:25:56,261 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T19:25:56,261 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T19:25:56,262 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T19:25:56,263 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T19:25:56,265 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T19:25:56,266 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T19:25:56,267 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38981-0x1007858fdfb0001, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T19:25:56,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T19:25:56,267 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38981-0x1007858fdfb0001, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:56,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:56,268 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6ef6ccb75414,40353,1732562756001, sessionid=0x1007858fdfb0000, setting cluster-up flag (Was=false) 2024-11-25T19:25:56,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:56,269 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38981-0x1007858fdfb0001, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:56,272 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T19:25:56,273 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6ef6ccb75414,40353,1732562756001 2024-11-25T19:25:56,276 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38981-0x1007858fdfb0001, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:56,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:56,279 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T19:25:56,280 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6ef6ccb75414,40353,1732562756001 2024-11-25T19:25:56,282 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T19:25:56,284 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T19:25:56,284 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T19:25:56,284 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T19:25:56,284 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6ef6ccb75414,40353,1732562756001 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T19:25:56,286 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:25:56,286 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:25:56,286 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:25:56,286 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:25:56,286 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6ef6ccb75414:0, corePoolSize=10, maxPoolSize=10 2024-11-25T19:25:56,287 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:56,287 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=2, maxPoolSize=2 2024-11-25T19:25:56,287 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:56,288 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T19:25:56,289 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T19:25:56,290 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:56,290 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T19:25:56,292 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732562786292 2024-11-25T19:25:56,293 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T19:25:56,293 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T19:25:56,293 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T19:25:56,293 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T19:25:56,293 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T19:25:56,293 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T19:25:56,301 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:56,305 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T19:25:56,305 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T19:25:56,306 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T19:25:56,306 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T19:25:56,306 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T19:25:56,309 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.large.0-1732562756306,5,FailOnTimeoutGroup] 2024-11-25T19:25:56,309 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.small.0-1732562756309,5,FailOnTimeoutGroup] 2024-11-25T19:25:56,309 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:56,310 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T19:25:56,310 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:56,310 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:56,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46233 is added to blk_1073741831_1007 (size=1321) 2024-11-25T19:25:56,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36773 is added to blk_1073741831_1007 (size=1321) 2024-11-25T19:25:56,318 INFO [RS:0;6ef6ccb75414:38981 {}] regionserver.HRegionServer(746): ClusterId : a138621a-782b-444c-82b6-9c93cbdbd3c6 2024-11-25T19:25:56,318 DEBUG [RS:0;6ef6ccb75414:38981 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T19:25:56,321 DEBUG [RS:0;6ef6ccb75414:38981 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T19:25:56,321 DEBUG [RS:0;6ef6ccb75414:38981 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T19:25:56,324 DEBUG [RS:0;6ef6ccb75414:38981 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T19:25:56,325 DEBUG [RS:0;6ef6ccb75414:38981 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f3cfde0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ef6ccb75414/172.17.0.2:0 2024-11-25T19:25:56,331 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:25:56,340 DEBUG [RS:0;6ef6ccb75414:38981 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6ef6ccb75414:38981 2024-11-25T19:25:56,340 INFO [RS:0;6ef6ccb75414:38981 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T19:25:56,340 INFO [RS:0;6ef6ccb75414:38981 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T19:25:56,340 DEBUG [RS:0;6ef6ccb75414:38981 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T19:25:56,342 INFO [RS:0;6ef6ccb75414:38981 {}] regionserver.HRegionServer(2659): reportForDuty to master=6ef6ccb75414,40353,1732562756001 with port=38981, startcode=1732562756052 2024-11-25T19:25:56,342 DEBUG [RS:0;6ef6ccb75414:38981 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T19:25:56,344 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:25:56,353 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42781, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T19:25:56,354 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40353 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6ef6ccb75414,38981,1732562756052 2024-11-25T19:25:56,354 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40353 {}] master.ServerManager(517): Registering regionserver=6ef6ccb75414,38981,1732562756052 2024-11-25T19:25:56,359 DEBUG [RS:0;6ef6ccb75414:38981 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555 2024-11-25T19:25:56,359 DEBUG [RS:0;6ef6ccb75414:38981 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40559 2024-11-25T19:25:56,359 DEBUG [RS:0;6ef6ccb75414:38981 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T19:25:56,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T19:25:56,362 DEBUG [RS:0;6ef6ccb75414:38981 {}] zookeeper.ZKUtil(111): regionserver:38981-0x1007858fdfb0001, quorum=127.0.0.1:56329, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6ef6ccb75414,38981,1732562756052 2024-11-25T19:25:56,362 WARN [RS:0;6ef6ccb75414:38981 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T19:25:56,362 INFO [RS:0;6ef6ccb75414:38981 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:25:56,362 DEBUG [RS:0;6ef6ccb75414:38981 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052 2024-11-25T19:25:56,364 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6ef6ccb75414,38981,1732562756052] 2024-11-25T19:25:56,368 INFO [RS:0;6ef6ccb75414:38981 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T19:25:56,373 INFO [RS:0;6ef6ccb75414:38981 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T19:25:56,381 INFO [RS:0;6ef6ccb75414:38981 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T19:25:56,381 INFO [RS:0;6ef6ccb75414:38981 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:56,382 INFO [RS:0;6ef6ccb75414:38981 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T19:25:56,383 INFO [RS:0;6ef6ccb75414:38981 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T19:25:56,383 INFO [RS:0;6ef6ccb75414:38981 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:56,383 DEBUG [RS:0;6ef6ccb75414:38981 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:56,383 DEBUG [RS:0;6ef6ccb75414:38981 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:56,383 DEBUG [RS:0;6ef6ccb75414:38981 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:56,384 DEBUG [RS:0;6ef6ccb75414:38981 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:56,384 DEBUG [RS:0;6ef6ccb75414:38981 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:56,384 DEBUG [RS:0;6ef6ccb75414:38981 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6ef6ccb75414:0, corePoolSize=2, maxPoolSize=2 2024-11-25T19:25:56,384 DEBUG [RS:0;6ef6ccb75414:38981 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:56,384 DEBUG [RS:0;6ef6ccb75414:38981 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:56,384 DEBUG [RS:0;6ef6ccb75414:38981 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:56,384 DEBUG [RS:0;6ef6ccb75414:38981 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:56,384 DEBUG [RS:0;6ef6ccb75414:38981 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:56,384 DEBUG [RS:0;6ef6ccb75414:38981 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:56,384 DEBUG [RS:0;6ef6ccb75414:38981 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6ef6ccb75414:0, corePoolSize=3, maxPoolSize=3 2024-11-25T19:25:56,384 DEBUG [RS:0;6ef6ccb75414:38981 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0, corePoolSize=3, maxPoolSize=3 2024-11-25T19:25:56,386 INFO [RS:0;6ef6ccb75414:38981 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:56,386 INFO [RS:0;6ef6ccb75414:38981 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:56,386 INFO [RS:0;6ef6ccb75414:38981 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:56,387 INFO [RS:0;6ef6ccb75414:38981 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:56,387 INFO [RS:0;6ef6ccb75414:38981 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:56,387 INFO [RS:0;6ef6ccb75414:38981 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,38981,1732562756052-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T19:25:56,410 INFO [RS:0;6ef6ccb75414:38981 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T19:25:56,410 INFO [RS:0;6ef6ccb75414:38981 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,38981,1732562756052-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:56,410 INFO [RS:0;6ef6ccb75414:38981 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:56,410 INFO [RS:0;6ef6ccb75414:38981 {}] regionserver.Replication(171): 6ef6ccb75414,38981,1732562756052 started 2024-11-25T19:25:56,430 INFO [RS:0;6ef6ccb75414:38981 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:56,431 INFO [RS:0;6ef6ccb75414:38981 {}] regionserver.HRegionServer(1482): Serving as 6ef6ccb75414,38981,1732562756052, RpcServer on 6ef6ccb75414/172.17.0.2:38981, sessionid=0x1007858fdfb0001 2024-11-25T19:25:56,431 DEBUG [RS:0;6ef6ccb75414:38981 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T19:25:56,431 DEBUG [RS:0;6ef6ccb75414:38981 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6ef6ccb75414,38981,1732562756052 2024-11-25T19:25:56,431 DEBUG [RS:0;6ef6ccb75414:38981 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ef6ccb75414,38981,1732562756052' 2024-11-25T19:25:56,431 DEBUG [RS:0;6ef6ccb75414:38981 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T19:25:56,432 DEBUG [RS:0;6ef6ccb75414:38981 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T19:25:56,432 DEBUG [RS:0;6ef6ccb75414:38981 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T19:25:56,432 DEBUG [RS:0;6ef6ccb75414:38981 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T19:25:56,432 DEBUG [RS:0;6ef6ccb75414:38981 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6ef6ccb75414,38981,1732562756052 2024-11-25T19:25:56,432 DEBUG [RS:0;6ef6ccb75414:38981 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ef6ccb75414,38981,1732562756052' 2024-11-25T19:25:56,432 DEBUG [RS:0;6ef6ccb75414:38981 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T19:25:56,433 DEBUG [RS:0;6ef6ccb75414:38981 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T19:25:56,433 DEBUG [RS:0;6ef6ccb75414:38981 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T19:25:56,433 INFO [RS:0;6ef6ccb75414:38981 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T19:25:56,433 INFO [RS:0;6ef6ccb75414:38981 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T19:25:56,536 INFO [RS:0;6ef6ccb75414:38981 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ef6ccb75414%2C38981%2C1732562756052, suffix=, logDir=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052, archiveDir=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/oldWALs, maxLogs=32 2024-11-25T19:25:56,537 INFO [RS:0;6ef6ccb75414:38981 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C38981%2C1732562756052.1732562756536 2024-11-25T19:25:56,554 INFO [RS:0;6ef6ccb75414:38981 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562756536 2024-11-25T19:25:56,570 DEBUG [RS:0;6ef6ccb75414:38981 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41591:41591),(127.0.0.1/127.0.0.1:33179:33179)] 2024-11-25T19:25:56,717 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T19:25:56,717 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555 2024-11-25T19:25:56,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36773 is added to blk_1073741833_1009 (size=32) 2024-11-25T19:25:56,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46233 is added to blk_1073741833_1009 (size=32) 2024-11-25T19:25:56,726 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:25:56,727 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T19:25:56,729 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T19:25:56,729 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:56,729 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:25:56,730 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T19:25:56,731 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T19:25:56,732 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:56,732 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:25:56,732 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T19:25:56,734 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T19:25:56,734 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:56,735 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:25:56,735 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T19:25:56,736 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T19:25:56,737 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:56,737 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:25:56,737 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T19:25:56,738 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/hbase/meta/1588230740 2024-11-25T19:25:56,739 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/hbase/meta/1588230740 2024-11-25T19:25:56,740 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T19:25:56,740 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T19:25:56,741 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T19:25:56,743 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T19:25:56,745 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T19:25:56,746 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=809065, jitterRate=0.02878059446811676}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T19:25:56,747 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732562756726Initializing all the Stores at 1732562756727 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562756727Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562756727Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562756727Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562756727Cleaning up temporary data from old regions at 1732562756740 (+13 ms)Region opened successfully at 1732562756747 (+7 ms) 2024-11-25T19:25:56,747 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T19:25:56,747 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T19:25:56,747 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T19:25:56,748 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 1 ms 2024-11-25T19:25:56,748 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T19:25:56,748 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T19:25:56,748 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732562756747Disabling compacts and flushes for region at 1732562756747Disabling writes for close at 1732562756748 (+1 ms)Writing region close event to WAL at 1732562756748Closed at 1732562756748 2024-11-25T19:25:56,750 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T19:25:56,750 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T19:25:56,750 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T19:25:56,752 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T19:25:56,753 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T19:25:56,863 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T19:25:56,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:25:56,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:25:56,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:25:56,886 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:25:56,904 DEBUG [6ef6ccb75414:40353 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-25T19:25:56,904 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6ef6ccb75414,38981,1732562756052 2024-11-25T19:25:56,906 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6ef6ccb75414,38981,1732562756052, state=OPENING 2024-11-25T19:25:56,907 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T19:25:56,908 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38981-0x1007858fdfb0001, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:56,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:25:56,909 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:25:56,909 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T19:25:56,909 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6ef6ccb75414,38981,1732562756052}] 2024-11-25T19:25:56,909 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:25:57,063 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T19:25:57,067 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34573, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T19:25:57,072 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T19:25:57,072 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:25:57,074 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ef6ccb75414%2C38981%2C1732562756052.meta, suffix=.meta, logDir=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052, archiveDir=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/oldWALs, maxLogs=32 2024-11-25T19:25:57,075 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta 2024-11-25T19:25:57,086 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta 2024-11-25T19:25:57,087 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33179:33179),(127.0.0.1/127.0.0.1:41591:41591)] 2024-11-25T19:25:57,088 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T19:25:57,089 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T19:25:57,089 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T19:25:57,089 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T19:25:57,089 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T19:25:57,089 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:25:57,089 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T19:25:57,089 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T19:25:57,093 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T19:25:57,095 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T19:25:57,095 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:57,096 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:25:57,096 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T19:25:57,097 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T19:25:57,097 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:57,097 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:25:57,097 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T19:25:57,098 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T19:25:57,098 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:57,099 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:25:57,099 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T19:25:57,100 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T19:25:57,100 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:57,101 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:25:57,101 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T19:25:57,102 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/hbase/meta/1588230740 2024-11-25T19:25:57,104 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/hbase/meta/1588230740 2024-11-25T19:25:57,105 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T19:25:57,105 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T19:25:57,106 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T19:25:57,108 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T19:25:57,109 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=820065, jitterRate=0.04276685416698456}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T19:25:57,109 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T19:25:57,111 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732562757089Writing region info on filesystem at 1732562757089Initializing all the Stores at 1732562757091 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562757091Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562757093 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562757093Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562757093Cleaning up temporary data from old regions at 1732562757105 (+12 ms)Running coprocessor post-open hooks at 1732562757109 (+4 ms)Region opened successfully at 1732562757110 (+1 ms) 2024-11-25T19:25:57,112 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732562757063 2024-11-25T19:25:57,116 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T19:25:57,116 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T19:25:57,117 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6ef6ccb75414,38981,1732562756052 2024-11-25T19:25:57,119 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6ef6ccb75414,38981,1732562756052, state=OPEN 2024-11-25T19:25:57,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T19:25:57,121 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38981-0x1007858fdfb0001, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T19:25:57,121 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6ef6ccb75414,38981,1732562756052 2024-11-25T19:25:57,121 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:25:57,122 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:25:57,125 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T19:25:57,125 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6ef6ccb75414,38981,1732562756052 in 212 msec 2024-11-25T19:25:57,129 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T19:25:57,129 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 375 msec 2024-11-25T19:25:57,130 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T19:25:57,130 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T19:25:57,132 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T19:25:57,132 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6ef6ccb75414,38981,1732562756052, seqNum=-1] 2024-11-25T19:25:57,132 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T19:25:57,134 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43975, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T19:25:57,141 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 857 msec 2024-11-25T19:25:57,142 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732562757141, completionTime=-1 2024-11-25T19:25:57,142 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-25T19:25:57,142 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T19:25:57,144 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-25T19:25:57,144 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732562817144 2024-11-25T19:25:57,144 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732562877144 2024-11-25T19:25:57,145 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-25T19:25:57,145 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,40353,1732562756001-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:57,145 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,40353,1732562756001-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:57,145 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,40353,1732562756001-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:57,145 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6ef6ccb75414:40353, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:57,146 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:57,146 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:57,148 DEBUG [master/6ef6ccb75414:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T19:25:57,151 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.024sec 2024-11-25T19:25:57,151 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T19:25:57,151 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T19:25:57,151 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T19:25:57,151 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T19:25:57,151 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T19:25:57,151 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,40353,1732562756001-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T19:25:57,151 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,40353,1732562756001-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T19:25:57,154 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T19:25:57,154 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T19:25:57,155 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,40353,1732562756001-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:57,219 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24c93faa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T19:25:57,219 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6ef6ccb75414,40353,-1 for getting cluster id 2024-11-25T19:25:57,219 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T19:25:57,221 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a138621a-782b-444c-82b6-9c93cbdbd3c6' 2024-11-25T19:25:57,221 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T19:25:57,221 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a138621a-782b-444c-82b6-9c93cbdbd3c6" 2024-11-25T19:25:57,221 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11de8157, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T19:25:57,222 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6ef6ccb75414,40353,-1] 2024-11-25T19:25:57,222 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T19:25:57,222 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:25:57,225 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58828, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T19:25:57,226 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25f1a17d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T19:25:57,227 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T19:25:57,228 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6ef6ccb75414,38981,1732562756052, seqNum=-1] 2024-11-25T19:25:57,229 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T19:25:57,231 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58242, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T19:25:57,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6ef6ccb75414,40353,1732562756001 2024-11-25T19:25:57,234 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:25:57,238 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-25T19:25:57,286 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6ef6ccb75414:0 server-side Connection retries=45 2024-11-25T19:25:57,287 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:25:57,287 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T19:25:57,287 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T19:25:57,287 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:25:57,287 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T19:25:57,287 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T19:25:57,287 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T19:25:57,288 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37563 2024-11-25T19:25:57,290 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37563 connecting to ZooKeeper ensemble=127.0.0.1:56329 2024-11-25T19:25:57,292 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:25:57,295 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:25:57,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:375630x0, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T19:25:57,301 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:375630x0, quorum=127.0.0.1:56329, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-25T19:25:57,301 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-25T19:25:57,302 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T19:25:57,305 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T19:25:57,307 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:375630x0, quorum=127.0.0.1:56329, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T19:25:57,308 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:375630x0, quorum=127.0.0.1:56329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T19:25:57,312 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37563 2024-11-25T19:25:57,312 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37563-0x1007858fdfb0002 connected 2024-11-25T19:25:57,313 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37563 2024-11-25T19:25:57,313 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37563 2024-11-25T19:25:57,314 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37563 2024-11-25T19:25:57,314 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37563 2024-11-25T19:25:57,316 INFO [RS:1;6ef6ccb75414:37563 {}] regionserver.HRegionServer(746): ClusterId : a138621a-782b-444c-82b6-9c93cbdbd3c6 2024-11-25T19:25:57,316 DEBUG [RS:1;6ef6ccb75414:37563 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T19:25:57,318 DEBUG [RS:1;6ef6ccb75414:37563 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T19:25:57,318 DEBUG [RS:1;6ef6ccb75414:37563 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T19:25:57,319 DEBUG [RS:1;6ef6ccb75414:37563 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T19:25:57,320 DEBUG [RS:1;6ef6ccb75414:37563 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f6fcd20, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ef6ccb75414/172.17.0.2:0 2024-11-25T19:25:57,332 DEBUG [RS:1;6ef6ccb75414:37563 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;6ef6ccb75414:37563 2024-11-25T19:25:57,332 INFO [RS:1;6ef6ccb75414:37563 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T19:25:57,332 INFO [RS:1;6ef6ccb75414:37563 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T19:25:57,332 DEBUG [RS:1;6ef6ccb75414:37563 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T19:25:57,333 INFO [RS:1;6ef6ccb75414:37563 {}] regionserver.HRegionServer(2659): reportForDuty to master=6ef6ccb75414,40353,1732562756001 with port=37563, startcode=1732562757286 2024-11-25T19:25:57,333 DEBUG [RS:1;6ef6ccb75414:37563 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T19:25:57,335 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50819, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T19:25:57,335 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40353 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6ef6ccb75414,37563,1732562757286 2024-11-25T19:25:57,335 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40353 {}] master.ServerManager(517): Registering regionserver=6ef6ccb75414,37563,1732562757286 2024-11-25T19:25:57,337 DEBUG [RS:1;6ef6ccb75414:37563 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555 2024-11-25T19:25:57,337 DEBUG [RS:1;6ef6ccb75414:37563 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40559 2024-11-25T19:25:57,337 DEBUG [RS:1;6ef6ccb75414:37563 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T19:25:57,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T19:25:57,339 DEBUG [RS:1;6ef6ccb75414:37563 {}] zookeeper.ZKUtil(111): regionserver:37563-0x1007858fdfb0002, quorum=127.0.0.1:56329, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6ef6ccb75414,37563,1732562757286 2024-11-25T19:25:57,339 WARN [RS:1;6ef6ccb75414:37563 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T19:25:57,339 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6ef6ccb75414,37563,1732562757286] 2024-11-25T19:25:57,339 INFO [RS:1;6ef6ccb75414:37563 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:25:57,339 DEBUG [RS:1;6ef6ccb75414:37563 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286 2024-11-25T19:25:57,344 INFO [RS:1;6ef6ccb75414:37563 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T19:25:57,346 INFO [RS:1;6ef6ccb75414:37563 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T19:25:57,346 INFO [RS:1;6ef6ccb75414:37563 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T19:25:57,347 INFO [RS:1;6ef6ccb75414:37563 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:57,347 INFO [RS:1;6ef6ccb75414:37563 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T19:25:57,348 INFO [RS:1;6ef6ccb75414:37563 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T19:25:57,348 INFO [RS:1;6ef6ccb75414:37563 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:57,348 DEBUG [RS:1;6ef6ccb75414:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:57,348 DEBUG [RS:1;6ef6ccb75414:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:57,348 DEBUG [RS:1;6ef6ccb75414:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:57,348 DEBUG [RS:1;6ef6ccb75414:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:57,348 DEBUG [RS:1;6ef6ccb75414:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:57,348 DEBUG [RS:1;6ef6ccb75414:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6ef6ccb75414:0, corePoolSize=2, maxPoolSize=2 2024-11-25T19:25:57,348 DEBUG [RS:1;6ef6ccb75414:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:57,348 DEBUG [RS:1;6ef6ccb75414:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:57,348 DEBUG [RS:1;6ef6ccb75414:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:57,348 DEBUG [RS:1;6ef6ccb75414:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:57,348 DEBUG [RS:1;6ef6ccb75414:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:57,348 DEBUG [RS:1;6ef6ccb75414:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:25:57,349 DEBUG [RS:1;6ef6ccb75414:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6ef6ccb75414:0, corePoolSize=3, maxPoolSize=3 2024-11-25T19:25:57,349 DEBUG [RS:1;6ef6ccb75414:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0, corePoolSize=3, maxPoolSize=3 2024-11-25T19:25:57,352 INFO [RS:1;6ef6ccb75414:37563 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:57,352 INFO [RS:1;6ef6ccb75414:37563 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:57,352 INFO [RS:1;6ef6ccb75414:37563 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:57,353 INFO [RS:1;6ef6ccb75414:37563 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:57,353 INFO [RS:1;6ef6ccb75414:37563 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:57,353 INFO [RS:1;6ef6ccb75414:37563 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,37563,1732562757286-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T19:25:57,366 INFO [RS:1;6ef6ccb75414:37563 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T19:25:57,366 INFO [RS:1;6ef6ccb75414:37563 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,37563,1732562757286-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:57,366 INFO [RS:1;6ef6ccb75414:37563 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:57,366 INFO [RS:1;6ef6ccb75414:37563 {}] regionserver.Replication(171): 6ef6ccb75414,37563,1732562757286 started 2024-11-25T19:25:57,379 INFO [RS:1;6ef6ccb75414:37563 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:25:57,379 INFO [RS:1;6ef6ccb75414:37563 {}] regionserver.HRegionServer(1482): Serving as 6ef6ccb75414,37563,1732562757286, RpcServer on 6ef6ccb75414/172.17.0.2:37563, sessionid=0x1007858fdfb0002 2024-11-25T19:25:57,379 DEBUG [RS:1;6ef6ccb75414:37563 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T19:25:57,379 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;6ef6ccb75414:37563,5,FailOnTimeoutGroup] 2024-11-25T19:25:57,379 DEBUG [RS:1;6ef6ccb75414:37563 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6ef6ccb75414,37563,1732562757286 2024-11-25T19:25:57,379 DEBUG [RS:1;6ef6ccb75414:37563 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ef6ccb75414,37563,1732562757286' 2024-11-25T19:25:57,379 DEBUG [RS:1;6ef6ccb75414:37563 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T19:25:57,379 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-25T19:25:57,380 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-25T19:25:57,380 DEBUG [RS:1;6ef6ccb75414:37563 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T19:25:57,380 DEBUG [RS:1;6ef6ccb75414:37563 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T19:25:57,380 DEBUG [RS:1;6ef6ccb75414:37563 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T19:25:57,380 DEBUG [RS:1;6ef6ccb75414:37563 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6ef6ccb75414,37563,1732562757286 2024-11-25T19:25:57,380 DEBUG [RS:1;6ef6ccb75414:37563 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ef6ccb75414,37563,1732562757286' 2024-11-25T19:25:57,380 DEBUG [RS:1;6ef6ccb75414:37563 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T19:25:57,380 DEBUG [RS:1;6ef6ccb75414:37563 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T19:25:57,381 DEBUG [RS:1;6ef6ccb75414:37563 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T19:25:57,381 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 6ef6ccb75414,40353,1732562756001 2024-11-25T19:25:57,381 INFO [RS:1;6ef6ccb75414:37563 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T19:25:57,381 INFO [RS:1;6ef6ccb75414:37563 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T19:25:57,381 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7a922ed3 2024-11-25T19:25:57,381 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T19:25:57,383 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58838, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T19:25:57,383 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40353 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-25T19:25:57,383 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40353 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-25T19:25:57,384 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40353 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T19:25:57,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40353 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-25T19:25:57,387 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T19:25:57,387 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:57,387 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40353 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-25T19:25:57,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40353 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T19:25:57,388 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T19:25:57,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46233 is added to blk_1073741835_1011 (size=393) 2024-11-25T19:25:57,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36773 is added to blk_1073741835_1011 (size=393) 2024-11-25T19:25:57,398 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5500a35790ae594e549e5c2f71f85faa, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555 2024-11-25T19:25:57,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36773 is added to blk_1073741836_1012 (size=76) 2024-11-25T19:25:57,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46233 is added to blk_1073741836_1012 (size=76) 2024-11-25T19:25:57,411 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:25:57,411 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 5500a35790ae594e549e5c2f71f85faa, disabling compactions & flushes 2024-11-25T19:25:57,411 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa. 2024-11-25T19:25:57,411 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa. 2024-11-25T19:25:57,411 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa. after waiting 0 ms 2024-11-25T19:25:57,412 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa. 2024-11-25T19:25:57,412 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa. 2024-11-25T19:25:57,412 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5500a35790ae594e549e5c2f71f85faa: Waiting for close lock at 1732562757411Disabling compacts and flushes for region at 1732562757411Disabling writes for close at 1732562757411Writing region close event to WAL at 1732562757412 (+1 ms)Closed at 1732562757412 2024-11-25T19:25:57,413 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T19:25:57,414 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732562757413"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732562757413"}]},"ts":"1732562757413"} 2024-11-25T19:25:57,417 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-25T19:25:57,418 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T19:25:57,419 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732562757418"}]},"ts":"1732562757418"} 2024-11-25T19:25:57,421 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-25T19:25:57,422 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=5500a35790ae594e549e5c2f71f85faa, ASSIGN}] 2024-11-25T19:25:57,423 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=5500a35790ae594e549e5c2f71f85faa, ASSIGN 2024-11-25T19:25:57,425 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=5500a35790ae594e549e5c2f71f85faa, ASSIGN; state=OFFLINE, location=6ef6ccb75414,38981,1732562756052; forceNewPlan=false, retain=false 2024-11-25T19:25:57,483 INFO [RS:1;6ef6ccb75414:37563 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ef6ccb75414%2C37563%2C1732562757286, suffix=, logDir=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286, archiveDir=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/oldWALs, maxLogs=32 2024-11-25T19:25:57,484 INFO [RS:1;6ef6ccb75414:37563 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C37563%2C1732562757286.1732562757484 2024-11-25T19:25:57,495 INFO [RS:1;6ef6ccb75414:37563 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 2024-11-25T19:25:57,499 DEBUG [RS:1;6ef6ccb75414:37563 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41591:41591),(127.0.0.1/127.0.0.1:33179:33179)] 2024-11-25T19:25:57,575 INFO [6ef6ccb75414:40353 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-25T19:25:57,576 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5500a35790ae594e549e5c2f71f85faa, regionState=OPENING, regionLocation=6ef6ccb75414,38981,1732562756052 2024-11-25T19:25:57,579 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=5500a35790ae594e549e5c2f71f85faa, ASSIGN because future has completed 2024-11-25T19:25:57,579 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5500a35790ae594e549e5c2f71f85faa, server=6ef6ccb75414,38981,1732562756052}] 2024-11-25T19:25:57,735 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa. 2024-11-25T19:25:57,736 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 5500a35790ae594e549e5c2f71f85faa, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa.', STARTKEY => '', ENDKEY => ''} 2024-11-25T19:25:57,737 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 5500a35790ae594e549e5c2f71f85faa 2024-11-25T19:25:57,737 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:25:57,737 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 5500a35790ae594e549e5c2f71f85faa 2024-11-25T19:25:57,737 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 5500a35790ae594e549e5c2f71f85faa 2024-11-25T19:25:57,739 INFO [StoreOpener-5500a35790ae594e549e5c2f71f85faa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 5500a35790ae594e549e5c2f71f85faa 2024-11-25T19:25:57,741 INFO [StoreOpener-5500a35790ae594e549e5c2f71f85faa-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5500a35790ae594e549e5c2f71f85faa columnFamilyName info 2024-11-25T19:25:57,741 DEBUG [StoreOpener-5500a35790ae594e549e5c2f71f85faa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:25:57,741 INFO [StoreOpener-5500a35790ae594e549e5c2f71f85faa-1 {}] regionserver.HStore(327): Store=5500a35790ae594e549e5c2f71f85faa/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:25:57,742 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 5500a35790ae594e549e5c2f71f85faa 2024-11-25T19:25:57,743 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa 2024-11-25T19:25:57,743 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa 2024-11-25T19:25:57,744 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 5500a35790ae594e549e5c2f71f85faa 2024-11-25T19:25:57,744 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 5500a35790ae594e549e5c2f71f85faa 2024-11-25T19:25:57,746 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 5500a35790ae594e549e5c2f71f85faa 2024-11-25T19:25:57,749 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T19:25:57,750 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 5500a35790ae594e549e5c2f71f85faa; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=733436, jitterRate=-0.06738823652267456}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T19:25:57,750 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5500a35790ae594e549e5c2f71f85faa 2024-11-25T19:25:57,751 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 5500a35790ae594e549e5c2f71f85faa: Running coprocessor pre-open hook at 1732562757737Writing region info on filesystem at 1732562757737Initializing all the Stores at 1732562757738 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562757738Cleaning up temporary data from old regions at 1732562757744 (+6 ms)Running coprocessor post-open hooks at 1732562757750 (+6 ms)Region opened successfully at 1732562757751 (+1 ms) 2024-11-25T19:25:57,752 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa., pid=6, masterSystemTime=1732562757731 2024-11-25T19:25:57,755 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa. 2024-11-25T19:25:57,755 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa. 2024-11-25T19:25:57,756 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5500a35790ae594e549e5c2f71f85faa, regionState=OPEN, openSeqNum=2, regionLocation=6ef6ccb75414,38981,1732562756052 2024-11-25T19:25:57,759 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5500a35790ae594e549e5c2f71f85faa, server=6ef6ccb75414,38981,1732562756052 because future has completed 2024-11-25T19:25:57,766 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-25T19:25:57,766 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 5500a35790ae594e549e5c2f71f85faa, server=6ef6ccb75414,38981,1732562756052 in 182 msec 2024-11-25T19:25:57,771 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-25T19:25:57,771 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=5500a35790ae594e549e5c2f71f85faa, ASSIGN in 344 msec 2024-11-25T19:25:57,773 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T19:25:57,773 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732562757773"}]},"ts":"1732562757773"} 2024-11-25T19:25:57,776 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-25T19:25:57,778 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T19:25:57,781 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 394 msec 2024-11-25T19:26:02,595 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T19:26:02,597 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:02,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:02,616 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:02,617 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:02,627 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-25T19:26:05,385 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T19:26:05,385 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-25T19:26:05,388 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-25T19:26:05,388 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-25T19:26:05,389 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T19:26:05,389 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-25T19:26:05,389 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-25T19:26:05,390 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-25T19:26:07,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40353 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T19:26:07,458 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-25T19:26:07,458 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-25T19:26:07,461 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-25T19:26:07,461 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa. 2024-11-25T19:26:07,474 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:26:07,477 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:26:07,478 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:26:07,478 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:26:07,478 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T19:26:07,479 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2046b984{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:26:07,479 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5fd42be2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:26:07,572 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1744a862{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/java.io.tmpdir/jetty-localhost-36291-hadoop-hdfs-3_4_1-tests_jar-_-any-6926906219057644630/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:26:07,572 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2047cbbb{HTTP/1.1, (http/1.1)}{localhost:36291} 2024-11-25T19:26:07,573 INFO [Time-limited test {}] server.Server(415): Started @118161ms 2024-11-25T19:26:07,574 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T19:26:07,604 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:26:07,607 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:26:07,608 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:26:07,608 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:26:07,608 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T19:26:07,608 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c1aeedb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:26:07,609 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74c44b7d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:26:07,632 WARN [Thread-827 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data6/current/BP-985132614-172.17.0.2-1732562755277/current, will proceed with Du for space computation calculation, 2024-11-25T19:26:07,632 WARN [Thread-826 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data5/current/BP-985132614-172.17.0.2-1732562755277/current, will proceed with Du for space computation calculation, 2024-11-25T19:26:07,652 WARN [Thread-806 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T19:26:07,655 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcb8e68a7b81a535e with lease ID 0xcc28a8293c448957: Processing first storage report for DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31 from datanode DatanodeRegistration(127.0.0.1:36373, datanodeUuid=c54d2576-6559-4443-8e35-59f13983eaeb, infoPort=36059, infoSecurePort=0, ipcPort=45781, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277) 2024-11-25T19:26:07,655 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcb8e68a7b81a535e with lease ID 0xcc28a8293c448957: from storage DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31 node DatanodeRegistration(127.0.0.1:36373, datanodeUuid=c54d2576-6559-4443-8e35-59f13983eaeb, infoPort=36059, infoSecurePort=0, ipcPort=45781, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:26:07,655 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcb8e68a7b81a535e with lease ID 0xcc28a8293c448957: Processing first storage report for DS-58bb4fb4-918c-4fb4-8f4d-d27405007bfd from datanode DatanodeRegistration(127.0.0.1:36373, datanodeUuid=c54d2576-6559-4443-8e35-59f13983eaeb, infoPort=36059, infoSecurePort=0, ipcPort=45781, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277) 2024-11-25T19:26:07,655 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcb8e68a7b81a535e with lease ID 0xcc28a8293c448957: from storage DS-58bb4fb4-918c-4fb4-8f4d-d27405007bfd node DatanodeRegistration(127.0.0.1:36373, datanodeUuid=c54d2576-6559-4443-8e35-59f13983eaeb, infoPort=36059, infoSecurePort=0, ipcPort=45781, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:26:07,711 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@337c5dd4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/java.io.tmpdir/jetty-localhost-37583-hadoop-hdfs-3_4_1-tests_jar-_-any-15232721579761747038/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:26:07,712 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5b5be5aa{HTTP/1.1, (http/1.1)}{localhost:37583} 2024-11-25T19:26:07,712 INFO [Time-limited test {}] server.Server(415): Started @118300ms 2024-11-25T19:26:07,713 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T19:26:07,747 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:26:07,751 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:26:07,752 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:26:07,752 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:26:07,752 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T19:26:07,753 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c4c627f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:26:07,753 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@76c9fd0f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:26:07,774 WARN [Thread-861 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data7/current/BP-985132614-172.17.0.2-1732562755277/current, will proceed with Du for space computation calculation, 2024-11-25T19:26:07,775 WARN [Thread-862 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data8/current/BP-985132614-172.17.0.2-1732562755277/current, will proceed with Du for space computation calculation, 2024-11-25T19:26:07,789 WARN [Thread-841 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T19:26:07,791 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe35ce821b36e27b6 with lease ID 0xcc28a8293c448958: Processing first storage report for DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7 from datanode DatanodeRegistration(127.0.0.1:34241, datanodeUuid=aa9270de-d9bc-455a-91c7-f448a6b72532, infoPort=33219, infoSecurePort=0, ipcPort=41789, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277) 2024-11-25T19:26:07,791 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe35ce821b36e27b6 with lease ID 0xcc28a8293c448958: from storage DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7 node DatanodeRegistration(127.0.0.1:34241, datanodeUuid=aa9270de-d9bc-455a-91c7-f448a6b72532, infoPort=33219, infoSecurePort=0, ipcPort=41789, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:26:07,791 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe35ce821b36e27b6 with lease ID 0xcc28a8293c448958: Processing first storage report for DS-68170031-3335-4b38-bf2a-ea32cab70b1d from datanode DatanodeRegistration(127.0.0.1:34241, datanodeUuid=aa9270de-d9bc-455a-91c7-f448a6b72532, infoPort=33219, infoSecurePort=0, ipcPort=41789, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277) 2024-11-25T19:26:07,791 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe35ce821b36e27b6 with lease ID 0xcc28a8293c448958: from storage DS-68170031-3335-4b38-bf2a-ea32cab70b1d node DatanodeRegistration(127.0.0.1:34241, datanodeUuid=aa9270de-d9bc-455a-91c7-f448a6b72532, infoPort=33219, infoSecurePort=0, ipcPort=41789, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:26:07,848 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@54cdb957{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/java.io.tmpdir/jetty-localhost-37109-hadoop-hdfs-3_4_1-tests_jar-_-any-14103686976327607815/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:26:07,848 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@30add41a{HTTP/1.1, (http/1.1)}{localhost:37109} 2024-11-25T19:26:07,849 INFO [Time-limited test {}] server.Server(415): Started @118437ms 2024-11-25T19:26:07,850 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T19:26:07,908 WARN [Thread-887 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data9/current/BP-985132614-172.17.0.2-1732562755277/current, will proceed with Du for space computation calculation, 2024-11-25T19:26:07,908 WARN [Thread-888 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data10/current/BP-985132614-172.17.0.2-1732562755277/current, will proceed with Du for space computation calculation, 2024-11-25T19:26:07,933 WARN [Thread-876 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T19:26:07,936 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x307c117a68fc6bf1 with lease ID 0xcc28a8293c448959: Processing first storage report for DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f from datanode DatanodeRegistration(127.0.0.1:34341, datanodeUuid=d647dfc5-4b93-4852-8177-a0ecf3b361e6, infoPort=46711, infoSecurePort=0, ipcPort=44657, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277) 2024-11-25T19:26:07,936 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x307c117a68fc6bf1 with lease ID 0xcc28a8293c448959: from storage DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f node DatanodeRegistration(127.0.0.1:34341, datanodeUuid=d647dfc5-4b93-4852-8177-a0ecf3b361e6, infoPort=46711, infoSecurePort=0, ipcPort=44657, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:26:07,936 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x307c117a68fc6bf1 with lease ID 0xcc28a8293c448959: Processing first storage report for DS-548fe3c8-e27a-4129-99f3-4131e6c50c72 from datanode DatanodeRegistration(127.0.0.1:34341, datanodeUuid=d647dfc5-4b93-4852-8177-a0ecf3b361e6, infoPort=46711, infoSecurePort=0, ipcPort=44657, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277) 2024-11-25T19:26:07,936 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x307c117a68fc6bf1 with lease ID 0xcc28a8293c448959: from storage DS-548fe3c8-e27a-4129-99f3-4131e6c50c72 node DatanodeRegistration(127.0.0.1:34341, datanodeUuid=d647dfc5-4b93-4852-8177-a0ecf3b361e6, infoPort=46711, infoSecurePort=0, ipcPort=44657, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:26:07,968 WARN [ResponseProcessor for block BP-985132614-172.17.0.2-1732562755277:blk_1073741832_1008 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-985132614-172.17.0.2-1732562755277:blk_1073741832_1008 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:07,968 WARN [ResponseProcessor for block BP-985132614-172.17.0.2-1732562755277:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-985132614-172.17.0.2-1732562755277:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:07,969 WARN [DataStreamer for file /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562756536 block BP-985132614-172.17.0.2-1732562755277:blk_1073741832_1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741832_1008 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK], DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK]) is bad. 2024-11-25T19:26:07,969 WARN [ResponseProcessor for block BP-985132614-172.17.0.2-1732562755277:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-985132614-172.17.0.2-1732562755277:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-985132614-172.17.0.2-1732562755277:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:07,969 WARN [DataStreamer for file /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 block BP-985132614-172.17.0.2-1732562755277:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK], DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK]) is bad. 2024-11-25T19:26:07,969 WARN [ResponseProcessor for block BP-985132614-172.17.0.2-1732562755277:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-985132614-172.17.0.2-1732562755277:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-985132614-172.17.0.2-1732562755277:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:07,969 WARN [DataStreamer for file /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/WALs/6ef6ccb75414,40353,1732562756001/6ef6ccb75414%2C40353%2C1732562756001.1732562756209 block BP-985132614-172.17.0.2-1732562755277:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK], DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK]) is bad. 2024-11-25T19:26:07,969 WARN [DataStreamer for file /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta block BP-985132614-172.17.0.2-1732562755277:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK], DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK]) is bad. 2024-11-25T19:26:07,970 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:41106 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:46233:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41106 dst: /127.0.0.1:46233 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:07,970 WARN [PacketResponder: BP-985132614-172.17.0.2-1732562755277:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36773] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:07,970 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-550289095_22 at /127.0.0.1:34850 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:36773:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34850 dst: /127.0.0.1:36773 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:07,971 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-550289095_22 at /127.0.0.1:41144 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:46233:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41144 dst: /127.0.0.1:46233 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:07,970 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:34798 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:36773:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34798 dst: /127.0.0.1:36773 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:07,971 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:34822 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36773:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34822 dst: /127.0.0.1:36773 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:07,971 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:41138 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46233:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41138 dst: /127.0.0.1:46233 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:07,970 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1084990843_22 at /127.0.0.1:41078 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46233:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41078 dst: /127.0.0.1:46233 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:07,970 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1084990843_22 at /127.0.0.1:34774 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36773:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34774 dst: /127.0.0.1:36773 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:07,978 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@21d5e4af{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:26:07,978 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@145c0180{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:26:07,979 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:26:07,979 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7720beab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:26:07,979 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69bbaec1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/hadoop.log.dir/,STOPPED} 2024-11-25T19:26:07,980 WARN [BP-985132614-172.17.0.2-1732562755277 heartbeating to localhost/127.0.0.1:40559 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T19:26:07,980 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T19:26:07,980 WARN [BP-985132614-172.17.0.2-1732562755277 heartbeating to localhost/127.0.0.1:40559 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-985132614-172.17.0.2-1732562755277 (Datanode Uuid 85f1322b-a253-4d11-a769-38483729e20a) service to localhost/127.0.0.1:40559 2024-11-25T19:26:07,980 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T19:26:07,981 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data3/current/BP-985132614-172.17.0.2-1732562755277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:26:07,981 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data4/current/BP-985132614-172.17.0.2-1732562755277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:26:07,981 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T19:26:07,982 WARN [DataStreamer for file /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta block BP-985132614-172.17.0.2-1732562755277:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:07,982 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@6fdb820b {}] datanode.DataXceiver(331): 127.0.0.1:46233:DataXceiver error processing unknown operation src: /127.0.0.1:43460 dst: /127.0.0.1:46233 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:07,983 WARN [DataStreamer for file /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/WALs/6ef6ccb75414,40353,1732562756001/6ef6ccb75414%2C40353%2C1732562756001.1732562756209 block BP-985132614-172.17.0.2-1732562755277:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:07,984 WARN [DataStreamer for file /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 block BP-985132614-172.17.0.2-1732562755277:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:07,985 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@6fd20ca8 {}] datanode.DataXceiver(331): 127.0.0.1:46233:DataXceiver error processing unknown operation src: /127.0.0.1:43478 dst: /127.0.0.1:46233 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:07,985 WARN [DataStreamer for file /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562756536 block BP-985132614-172.17.0.2-1732562755277:blk_1073741832_1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741832_1008 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:07,993 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@ab5393f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:26:07,994 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@47f82e76{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:26:07,994 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:26:07,994 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30e7c448{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:26:07,994 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@257cf4bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/hadoop.log.dir/,STOPPED} 2024-11-25T19:26:08,001 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T19:26:08,001 WARN [BP-985132614-172.17.0.2-1732562755277 heartbeating to localhost/127.0.0.1:40559 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T19:26:08,001 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T19:26:08,001 WARN [BP-985132614-172.17.0.2-1732562755277 heartbeating to localhost/127.0.0.1:40559 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-985132614-172.17.0.2-1732562755277 (Datanode Uuid 4428cab8-534a-4ff9-a331-5e2fc125ec45) service to localhost/127.0.0.1:40559 2024-11-25T19:26:08,002 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data1/current/BP-985132614-172.17.0.2-1732562755277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:26:08,002 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data2/current/BP-985132614-172.17.0.2-1732562755277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:26:08,002 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T19:26:08,007 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa., hostname=6ef6ccb75414,38981,1732562756052, seqNum=2] 2024-11-25T19:26:08,009 ERROR [FSHLog-0-hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555-prefix:6ef6ccb75414,38981,1732562756052 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:08,009 WARN [FSHLog-0-hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555-prefix:6ef6ccb75414,38981,1732562756052 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:08,009 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:08,010 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6ef6ccb75414%2C38981%2C1732562756052:(num 1732562756536) roll requested 2024-11-25T19:26:08,010 INFO [regionserver/6ef6ccb75414:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C38981%2C1732562756052.1732562768010 2024-11-25T19:26:08,023 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:08,023 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:08,023 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:08,023 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:08,023 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:08,023 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562756536 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562768010 2024-11-25T19:26:08,024 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:08,024 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:08,024 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36059:36059),(127.0.0.1/127.0.0.1:46711:46711)] 2024-11-25T19:26:08,025 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562756536 is not closed yet, will try archiving it next time 2024-11-25T19:26:08,025 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-25T19:26:08,025 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-25T19:26:08,025 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562756536 2024-11-25T19:26:08,028 WARN [IPC Server handler 0 on default port 40559 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562756536 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741832_1008 2024-11-25T19:26:08,031 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562756536 after 4ms 2024-11-25T19:26:08,178 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:09,350 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:10,025 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:10,027 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562768010 2024-11-25T19:26:10,028 WARN [ResponseProcessor for block BP-985132614-172.17.0.2-1732562755277:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-985132614-172.17.0.2-1732562755277:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:10,029 WARN [DataStreamer for file /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562768010 block BP-985132614-172.17.0.2-1732562755277:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK], DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK]) is bad. 2024-11-25T19:26:10,030 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:33828 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:36373:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33828 dst: /127.0.0.1:36373 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:10,031 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:55218 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:34341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55218 dst: /127.0.0.1:34341 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:10,033 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1744a862{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:26:10,034 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2047cbbb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:26:10,034 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:26:10,034 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5fd42be2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:26:10,034 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2046b984{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/hadoop.log.dir/,STOPPED} 2024-11-25T19:26:10,036 WARN [BP-985132614-172.17.0.2-1732562755277 heartbeating to localhost/127.0.0.1:40559 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T19:26:10,036 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T19:26:10,036 WARN [BP-985132614-172.17.0.2-1732562755277 heartbeating to localhost/127.0.0.1:40559 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-985132614-172.17.0.2-1732562755277 (Datanode Uuid c54d2576-6559-4443-8e35-59f13983eaeb) service to localhost/127.0.0.1:40559 2024-11-25T19:26:10,036 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T19:26:10,037 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data5/current/BP-985132614-172.17.0.2-1732562755277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:26:10,037 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data6/current/BP-985132614-172.17.0.2-1732562755277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:26:10,037 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T19:26:10,179 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:11,351 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:12,026 WARN [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK]] 2024-11-25T19:26:12,026 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:12,026 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6ef6ccb75414%2C38981%2C1732562756052:(num 1732562768010) roll requested 2024-11-25T19:26:12,026 INFO [regionserver/6ef6ccb75414:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C38981%2C1732562756052.1732562772026 2024-11-25T19:26:12,029 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:12,030 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK], DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK]) is bad. 2024-11-25T19:26:12,030 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741839_1021 2024-11-25T19:26:12,032 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562756536 after 4007ms 2024-11-25T19:26:12,033 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK] 2024-11-25T19:26:12,038 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36773 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:12,038 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:57922 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741840_1022] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data8]'}, localName='127.0.0.1:34241', datanodeUuid='aa9270de-d9bc-455a-91c7-f448a6b72532', xmitsInProgress=0}:Exception transferring block BP-985132614-172.17.0.2-1732562755277:blk_1073741840_1022 to mirror 127.0.0.1:36773 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:12,038 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK], DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK]) is bad. 2024-11-25T19:26:12,038 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741840_1022 2024-11-25T19:26:12,038 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:57922 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741840_1022] {}] datanode.BlockReceiver(316): Block 1073741840 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-25T19:26:12,038 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:57922 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:34241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57922 dst: /127.0.0.1:34241 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:12,039 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK] 2024-11-25T19:26:12,042 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46233 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:12,042 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:57938 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data8]'}, localName='127.0.0.1:34241', datanodeUuid='aa9270de-d9bc-455a-91c7-f448a6b72532', xmitsInProgress=0}:Exception transferring block BP-985132614-172.17.0.2-1732562755277:blk_1073741841_1023 to mirror 127.0.0.1:46233 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:12,042 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK], DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]) is bad. 2024-11-25T19:26:12,042 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741841_1023 2024-11-25T19:26:12,043 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:57938 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-25T19:26:12,043 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-25T19:26:12,043 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:57938 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:34241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57938 dst: /127.0.0.1:34241 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:12,043 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK] 2024-11-25T19:26:12,047 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:12,048 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:12,048 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:12,048 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:12,048 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:12,048 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562768010 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562772026 2024-11-25T19:26:12,049 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33219:33219),(127.0.0.1/127.0.0.1:46711:46711)] 2024-11-25T19:26:12,049 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562756536 is not closed yet, will try archiving it next time 2024-11-25T19:26:12,049 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562768010 is not closed yet, will try archiving it next time 2024-11-25T19:26:12,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34341 is added to blk_1073741838_1020 (size=3600) 2024-11-25T19:26:12,179 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:12,451 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562756536 is not closed yet, will try archiving it next time 2024-11-25T19:26:13,351 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:13,949 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@75c76d19[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34341, datanodeUuid=d647dfc5-4b93-4852-8177-a0ecf3b361e6, infoPort=46711, infoSecurePort=0, ipcPort=44657, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277):Failed to transfer BP-985132614-172.17.0.2-1732562755277:blk_1073741838_1020 to 127.0.0.1:36773 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:14,046 WARN [ResponseProcessor for block BP-985132614-172.17.0.2-1732562755277:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-985132614-172.17.0.2-1732562755277:blk_1073741842_1024 java.io.IOException: Bad response ERROR for BP-985132614-172.17.0.2-1732562755277:blk_1073741842_1024 from datanode DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:14,047 WARN [PacketResponder: BP-985132614-172.17.0.2-1732562755277:blk_1073741842_1024, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34341] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:14,047 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:57950 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:34241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57950 dst: /127.0.0.1:34241 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:14,048 WARN [DataStreamer for file /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562772026 block BP-985132614-172.17.0.2-1732562755277:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK], DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK]) is bad. 2024-11-25T19:26:14,048 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:33514 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:34341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33514 dst: /127.0.0.1:34341 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:14,049 WARN [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK]] 2024-11-25T19:26:14,049 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:14,050 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6ef6ccb75414%2C38981%2C1732562756052:(num 1732562772026) roll requested 2024-11-25T19:26:14,050 INFO [regionserver/6ef6ccb75414:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C38981%2C1732562756052.1732562774050 2024-11-25T19:26:14,054 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@54cdb957{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:26:14,054 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@30add41a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:26:14,054 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:26:14,055 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@76c9fd0f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:26:14,055 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c4c627f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/hadoop.log.dir/,STOPPED} 2024-11-25T19:26:14,055 WARN [BP-985132614-172.17.0.2-1732562755277 heartbeating to localhost/127.0.0.1:40559 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T19:26:14,055 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T19:26:14,056 WARN [BP-985132614-172.17.0.2-1732562755277 heartbeating to localhost/127.0.0.1:40559 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-985132614-172.17.0.2-1732562755277 (Datanode Uuid d647dfc5-4b93-4852-8177-a0ecf3b361e6) service to localhost/127.0.0.1:40559 2024-11-25T19:26:14,056 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T19:26:14,056 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data9/current/BP-985132614-172.17.0.2-1732562755277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:26:14,056 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data10/current/BP-985132614-172.17.0.2-1732562755277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:26:14,057 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T19:26:14,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38981 {}] regionserver.HRegion(8855): Flush requested on 5500a35790ae594e549e5c2f71f85faa 2024-11-25T19:26:14,070 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5500a35790ae594e549e5c2f71f85faa 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T19:26:14,081 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:14,081 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK], DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK]) is bad. 2024-11-25T19:26:14,081 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741843_1026 2024-11-25T19:26:14,082 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK] 2024-11-25T19:26:14,085 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:14,085 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK], DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK]) is bad. 2024-11-25T19:26:14,085 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741844_1027 2024-11-25T19:26:14,086 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK] 2024-11-25T19:26:14,088 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:14,089 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK], DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]) is bad. 2024-11-25T19:26:14,089 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741845_1028 2024-11-25T19:26:14,090 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK] 2024-11-25T19:26:14,093 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:14,093 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK], DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK]) is bad. 2024-11-25T19:26:14,093 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741846_1029 2024-11-25T19:26:14,094 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK] 2024-11-25T19:26:14,095 WARN [IPC Server handler 0 on default port 40559 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T19:26:14,095 WARN [IPC Server handler 0 on default port 40559 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T19:26:14,096 WARN [IPC Server handler 0 on default port 40559 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T19:26:14,098 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp/info/75e87558d04342a7830dbd27b558ed86 is 1080, key is row0002/info:/1732562770039/Put/seqid=0 2024-11-25T19:26:14,100 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:14,100 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK], DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK]) is bad. 2024-11-25T19:26:14,100 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741848_1031 2024-11-25T19:26:14,102 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK] 2024-11-25T19:26:14,102 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:14,103 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:14,103 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:14,103 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:14,103 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:14,103 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562772026 with entries=11, filesize=11.81 KB; new WAL /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562774050 2024-11-25T19:26:14,104 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46233 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:14,104 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:57978 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data8]'}, localName='127.0.0.1:34241', datanodeUuid='aa9270de-d9bc-455a-91c7-f448a6b72532', xmitsInProgress=0}:Exception transferring block BP-985132614-172.17.0.2-1732562755277:blk_1073741849_1032 to mirror 127.0.0.1:46233 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:14,105 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK], DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]) is bad. 2024-11-25T19:26:14,105 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:57978 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T19:26:14,105 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741849_1032 2024-11-25T19:26:14,105 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:57978 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:34241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57978 dst: /127.0.0.1:34241 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:14,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741842_1025 (size=12106) 2024-11-25T19:26:14,106 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK] 2024-11-25T19:26:14,114 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33219:33219)] 2024-11-25T19:26:14,114 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562756536 is not closed yet, will try archiving it next time 2024-11-25T19:26:14,114 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36773 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:14,114 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562772026 is not closed yet, will try archiving it next time 2024-11-25T19:26:14,114 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:57988 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data8]'}, localName='127.0.0.1:34241', datanodeUuid='aa9270de-d9bc-455a-91c7-f448a6b72532', xmitsInProgress=0}:Exception transferring block BP-985132614-172.17.0.2-1732562755277:blk_1073741850_1033 to mirror 127.0.0.1:36773 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:14,114 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK], DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK]) is bad. 2024-11-25T19:26:14,114 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741850_1033 2024-11-25T19:26:14,114 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:57988 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T19:26:14,114 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:57988 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:34241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57988 dst: /127.0.0.1:34241 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:14,115 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK] 2024-11-25T19:26:14,117 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:14,118 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK], DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK]) is bad. 2024-11-25T19:26:14,118 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741851_1034 2024-11-25T19:26:14,118 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK] 2024-11-25T19:26:14,119 WARN [IPC Server handler 2 on default port 40559 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T19:26:14,119 WARN [IPC Server handler 2 on default port 40559 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T19:26:14,120 WARN [IPC Server handler 2 on default port 40559 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T19:26:14,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741852_1035 (size=10347) 2024-11-25T19:26:14,180 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:14,508 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562756536 is not closed yet, will try archiving it next time 2024-11-25T19:26:14,530 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp/info/75e87558d04342a7830dbd27b558ed86 2024-11-25T19:26:14,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp/info/75e87558d04342a7830dbd27b558ed86 as hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/75e87558d04342a7830dbd27b558ed86 2024-11-25T19:26:14,547 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/75e87558d04342a7830dbd27b558ed86, entries=5, sequenceid=11, filesize=10.1 K 2024-11-25T19:26:14,548 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 5500a35790ae594e549e5c2f71f85faa in 478ms, sequenceid=11, compaction requested=false 2024-11-25T19:26:14,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5500a35790ae594e549e5c2f71f85faa: 2024-11-25T19:26:14,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38981 {}] regionserver.HRegion(8855): Flush requested on 5500a35790ae594e549e5c2f71f85faa 2024-11-25T19:26:14,735 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5500a35790ae594e549e5c2f71f85faa 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-25T19:26:14,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp/info/09e337a606fc4ae797906aeec9d71a9e is 1080, key is row0007/info:/1732562774072/Put/seqid=0 2024-11-25T19:26:14,745 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:14,745 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK], DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK]) is bad. 2024-11-25T19:26:14,745 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741853_1036 2024-11-25T19:26:14,746 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK] 2024-11-25T19:26:14,747 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:14,748 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK], DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]) is bad. 2024-11-25T19:26:14,748 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741854_1037 2024-11-25T19:26:14,748 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK] 2024-11-25T19:26:14,750 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34341 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:14,750 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58020 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data8]'}, localName='127.0.0.1:34241', datanodeUuid='aa9270de-d9bc-455a-91c7-f448a6b72532', xmitsInProgress=0}:Exception transferring block BP-985132614-172.17.0.2-1732562755277:blk_1073741855_1038 to mirror 127.0.0.1:34341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:14,751 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK], DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK]) is bad. 2024-11-25T19:26:14,751 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741855_1038 2024-11-25T19:26:14,751 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58020 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T19:26:14,751 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58020 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:34241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58020 dst: /127.0.0.1:34241 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:14,751 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK] 2024-11-25T19:26:14,752 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:14,753 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK], DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK]) is bad. 2024-11-25T19:26:14,753 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741856_1039 2024-11-25T19:26:14,753 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK] 2024-11-25T19:26:14,754 WARN [IPC Server handler 3 on default port 40559 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T19:26:14,754 WARN [IPC Server handler 3 on default port 40559 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T19:26:14,754 WARN [IPC Server handler 3 on default port 40559 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T19:26:14,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741857_1040 (size=12506) 2024-11-25T19:26:15,158 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp/info/09e337a606fc4ae797906aeec9d71a9e 2024-11-25T19:26:15,168 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp/info/09e337a606fc4ae797906aeec9d71a9e as hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/09e337a606fc4ae797906aeec9d71a9e 2024-11-25T19:26:15,177 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/09e337a606fc4ae797906aeec9d71a9e, entries=7, sequenceid=24, filesize=12.2 K 2024-11-25T19:26:15,178 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 5500a35790ae594e549e5c2f71f85faa in 443ms, sequenceid=24, compaction requested=false 2024-11-25T19:26:15,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5500a35790ae594e549e5c2f71f85faa: 2024-11-25T19:26:15,178 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-25T19:26:15,178 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:26:15,178 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/09e337a606fc4ae797906aeec9d71a9e because midkey is the same as first or last row 2024-11-25T19:26:15,352 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:16,115 WARN [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK]] 2024-11-25T19:26:16,115 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:16,115 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6ef6ccb75414%2C38981%2C1732562756052:(num 1732562774050) roll requested 2024-11-25T19:26:16,116 INFO [regionserver/6ef6ccb75414:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C38981%2C1732562756052.1732562776116 2024-11-25T19:26:16,122 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:16,122 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK], DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK]) is bad. 2024-11-25T19:26:16,122 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741858_1041 2024-11-25T19:26:16,123 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK] 2024-11-25T19:26:16,125 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:16,125 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK], DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]) is bad. 2024-11-25T19:26:16,126 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741859_1042 2024-11-25T19:26:16,126 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK] 2024-11-25T19:26:16,128 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:16,129 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK], DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK]) is bad. 2024-11-25T19:26:16,129 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741860_1043 2024-11-25T19:26:16,129 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK] 2024-11-25T19:26:16,132 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36373 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:16,132 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58048 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data8]'}, localName='127.0.0.1:34241', datanodeUuid='aa9270de-d9bc-455a-91c7-f448a6b72532', xmitsInProgress=0}:Exception transferring block BP-985132614-172.17.0.2-1732562755277:blk_1073741861_1044 to mirror 127.0.0.1:36373 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:16,132 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK], DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK]) is bad. 2024-11-25T19:26:16,132 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58048 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-25T19:26:16,132 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741861_1044 2024-11-25T19:26:16,132 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58048 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:34241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58048 dst: /127.0.0.1:34241 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:16,133 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK] 2024-11-25T19:26:16,134 WARN [IPC Server handler 0 on default port 40559 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T19:26:16,134 WARN [IPC Server handler 0 on default port 40559 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T19:26:16,134 WARN [IPC Server handler 0 on default port 40559 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T19:26:16,137 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:16,137 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:16,137 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:16,137 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:16,137 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:16,137 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562774050 with entries=13, filesize=12.50 KB; new WAL /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562776116 2024-11-25T19:26:16,138 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33219:33219)] 2024-11-25T19:26:16,138 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562756536 is not closed yet, will try archiving it next time 2024-11-25T19:26:16,138 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562774050 is not closed yet, will try archiving it next time 2024-11-25T19:26:16,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741847_1030 (size=12810) 2024-11-25T19:26:16,140 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562768010 to hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/oldWALs/6ef6ccb75414%2C38981%2C1732562756052.1732562768010 2024-11-25T19:26:16,142 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562772026 to hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/oldWALs/6ef6ccb75414%2C38981%2C1732562756052.1732562772026 2024-11-25T19:26:16,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38981 {}] regionserver.HRegion(8855): Flush requested on 5500a35790ae594e549e5c2f71f85faa 2024-11-25T19:26:16,176 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5500a35790ae594e549e5c2f71f85faa 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-25T19:26:16,180 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:16,181 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp/info/a1d5c96c616c4f48912d33f27c61a5d2 is 1079, key is tmprow/info:/1732562776175/Put/seqid=0 2024-11-25T19:26:16,184 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36773 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:16,184 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58072 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data8]'}, localName='127.0.0.1:34241', datanodeUuid='aa9270de-d9bc-455a-91c7-f448a6b72532', xmitsInProgress=0}:Exception transferring block BP-985132614-172.17.0.2-1732562755277:blk_1073741863_1046 to mirror 127.0.0.1:36773 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:16,184 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK], DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK]) is bad. 2024-11-25T19:26:16,184 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741863_1046 2024-11-25T19:26:16,184 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58072 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T19:26:16,184 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58072 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:34241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58072 dst: /127.0.0.1:34241 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:16,184 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK] 2024-11-25T19:26:16,186 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:16,186 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK], DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK]) is bad. 2024-11-25T19:26:16,186 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741864_1047 2024-11-25T19:26:16,186 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK] 2024-11-25T19:26:16,188 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36373 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:16,188 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58080 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data8]'}, localName='127.0.0.1:34241', datanodeUuid='aa9270de-d9bc-455a-91c7-f448a6b72532', xmitsInProgress=0}:Exception transferring block BP-985132614-172.17.0.2-1732562755277:blk_1073741865_1048 to mirror 127.0.0.1:36373 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:16,188 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK], DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK]) is bad. 2024-11-25T19:26:16,188 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741865_1048 2024-11-25T19:26:16,188 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58080 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T19:26:16,188 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58080 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:34241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58080 dst: /127.0.0.1:34241 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:16,189 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK] 2024-11-25T19:26:16,191 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46233 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:16,191 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58096 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data8]'}, localName='127.0.0.1:34241', datanodeUuid='aa9270de-d9bc-455a-91c7-f448a6b72532', xmitsInProgress=0}:Exception transferring block BP-985132614-172.17.0.2-1732562755277:blk_1073741866_1049 to mirror 127.0.0.1:46233 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:16,191 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK], DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]) is bad. 2024-11-25T19:26:16,191 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741866_1049 2024-11-25T19:26:16,191 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58096 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T19:26:16,191 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58096 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:34241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58096 dst: /127.0.0.1:34241 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:16,192 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK] 2024-11-25T19:26:16,192 WARN [IPC Server handler 4 on default port 40559 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T19:26:16,192 WARN [IPC Server handler 4 on default port 40559 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T19:26:16,193 WARN [IPC Server handler 4 on default port 40559 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T19:26:16,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741867_1050 (size=6027) 2024-11-25T19:26:16,540 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562756536 is not closed yet, will try archiving it next time 2024-11-25T19:26:16,597 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp/info/a1d5c96c616c4f48912d33f27c61a5d2 2024-11-25T19:26:16,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp/info/a1d5c96c616c4f48912d33f27c61a5d2 as hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/a1d5c96c616c4f48912d33f27c61a5d2 2024-11-25T19:26:16,615 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/a1d5c96c616c4f48912d33f27c61a5d2, entries=1, sequenceid=34, filesize=5.9 K 2024-11-25T19:26:16,617 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 5500a35790ae594e549e5c2f71f85faa in 440ms, sequenceid=34, compaction requested=true 2024-11-25T19:26:16,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5500a35790ae594e549e5c2f71f85faa: 2024-11-25T19:26:16,617 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-25T19:26:16,617 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:26:16,617 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/09e337a606fc4ae797906aeec9d71a9e because midkey is the same as first or last row 2024-11-25T19:26:16,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5500a35790ae594e549e5c2f71f85faa:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T19:26:16,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:26:16,617 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T19:26:16,619 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T19:26:16,619 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.HStore(1541): 5500a35790ae594e549e5c2f71f85faa/info is initiating minor compaction (all files) 2024-11-25T19:26:16,619 INFO [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 5500a35790ae594e549e5c2f71f85faa/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa. 2024-11-25T19:26:16,619 INFO [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/75e87558d04342a7830dbd27b558ed86, hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/09e337a606fc4ae797906aeec9d71a9e, hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/a1d5c96c616c4f48912d33f27c61a5d2] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp, totalSize=28.2 K 2024-11-25T19:26:16,619 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] compactions.Compactor(225): Compacting 75e87558d04342a7830dbd27b558ed86, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732562770039 2024-11-25T19:26:16,620 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] compactions.Compactor(225): Compacting 09e337a606fc4ae797906aeec9d71a9e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732562774072 2024-11-25T19:26:16,620 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] compactions.Compactor(225): Compacting a1d5c96c616c4f48912d33f27c61a5d2, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732562776175 2024-11-25T19:26:16,634 INFO [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5500a35790ae594e549e5c2f71f85faa#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T19:26:16,635 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp/info/2f11c840009f41408b6bd3f54cf69af2 is 1080, key is row0002/info:/1732562770039/Put/seqid=0 2024-11-25T19:26:16,638 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58128 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741868_1051] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data8]'}, localName='127.0.0.1:34241', datanodeUuid='aa9270de-d9bc-455a-91c7-f448a6b72532', xmitsInProgress=0}:Exception transferring block BP-985132614-172.17.0.2-1732562755277:blk_1073741868_1051 to mirror 127.0.0.1:36373 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:16,638 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36373 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:16,638 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58128 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741868_1051] {}] datanode.BlockReceiver(316): Block 1073741868 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T19:26:16,638 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK], DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK]) is bad. 2024-11-25T19:26:16,639 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741868_1051 2024-11-25T19:26:16,639 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58128 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741868_1051] {}] datanode.DataXceiver(331): 127.0.0.1:34241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58128 dst: /127.0.0.1:34241 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:16,639 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK] 2024-11-25T19:26:16,640 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:16,641 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK], DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK]) is bad. 2024-11-25T19:26:16,641 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741869_1052 2024-11-25T19:26:16,641 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK] 2024-11-25T19:26:16,643 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46233 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:16,643 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58142 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data8]'}, localName='127.0.0.1:34241', datanodeUuid='aa9270de-d9bc-455a-91c7-f448a6b72532', xmitsInProgress=0}:Exception transferring block BP-985132614-172.17.0.2-1732562755277:blk_1073741870_1053 to mirror 127.0.0.1:46233 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:16,643 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK], DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]) is bad. 2024-11-25T19:26:16,643 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741870_1053 2024-11-25T19:26:16,643 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58142 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T19:26:16,643 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58142 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:34241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58142 dst: /127.0.0.1:34241 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:16,644 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK] 2024-11-25T19:26:16,646 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34341 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:16,646 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58152 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data8]'}, localName='127.0.0.1:34241', datanodeUuid='aa9270de-d9bc-455a-91c7-f448a6b72532', xmitsInProgress=0}:Exception transferring block BP-985132614-172.17.0.2-1732562755277:blk_1073741871_1054 to mirror 127.0.0.1:34341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:16,646 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK], DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK]) is bad. 2024-11-25T19:26:16,646 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58152 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T19:26:16,646 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741871_1054 2024-11-25T19:26:16,646 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883769004_22 at /127.0.0.1:58152 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:34241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58152 dst: /127.0.0.1:34241 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:16,647 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK] 2024-11-25T19:26:16,647 WARN [IPC Server handler 1 on default port 40559 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T19:26:16,648 WARN [IPC Server handler 1 on default port 40559 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T19:26:16,648 WARN [IPC Server handler 1 on default port 40559 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T19:26:16,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741872_1055 (size=17994) 2024-11-25T19:26:16,796 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@a7ca7f9[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34241, datanodeUuid=aa9270de-d9bc-455a-91c7-f448a6b72532, infoPort=33219, infoSecurePort=0, ipcPort=41789, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277):Failed to transfer BP-985132614-172.17.0.2-1732562755277:blk_1073741842_1025 to 127.0.0.1:36773 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:16,796 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@f0f37f0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34241, datanodeUuid=aa9270de-d9bc-455a-91c7-f448a6b72532, infoPort=33219, infoSecurePort=0, ipcPort=41789, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277):Failed to transfer BP-985132614-172.17.0.2-1732562755277:blk_1073741852_1035 to 127.0.0.1:34341 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:17,066 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp/info/2f11c840009f41408b6bd3f54cf69af2 as hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/2f11c840009f41408b6bd3f54cf69af2 2024-11-25T19:26:17,075 INFO [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 5500a35790ae594e549e5c2f71f85faa/info of 5500a35790ae594e549e5c2f71f85faa into 2f11c840009f41408b6bd3f54cf69af2(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T19:26:17,076 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 5500a35790ae594e549e5c2f71f85faa: 2024-11-25T19:26:17,076 INFO [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa., storeName=5500a35790ae594e549e5c2f71f85faa/info, priority=13, startTime=1732562776617; duration=0sec 2024-11-25T19:26:17,076 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-25T19:26:17,076 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:26:17,076 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/2f11c840009f41408b6bd3f54cf69af2 because midkey is the same as first or last row 2024-11-25T19:26:17,076 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-25T19:26:17,076 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:26:17,076 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/2f11c840009f41408b6bd3f54cf69af2 because midkey is the same as first or last row 2024-11-25T19:26:17,076 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-25T19:26:17,076 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:26:17,076 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/2f11c840009f41408b6bd3f54cf69af2 because midkey is the same as first or last row 2024-11-25T19:26:17,076 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:26:17,077 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5500a35790ae594e549e5c2f71f85faa:info 2024-11-25T19:26:17,352 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:17,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38981 {}] regionserver.HRegion(8855): Flush requested on 5500a35790ae594e549e5c2f71f85faa 2024-11-25T19:26:17,622 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5500a35790ae594e549e5c2f71f85faa 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-25T19:26:17,628 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp/info/e33af4af3cad4e58a8d24c3deaa40022 is 1079, key is tmprow/info:/1732562777618/Put/seqid=0 2024-11-25T19:26:17,629 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:17,629 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK], DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]) is bad. 2024-11-25T19:26:17,629 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741873_1056 2024-11-25T19:26:17,630 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK] 2024-11-25T19:26:17,631 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:17,631 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK], DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK]) is bad. 2024-11-25T19:26:17,631 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741874_1057 2024-11-25T19:26:17,632 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK] 2024-11-25T19:26:17,633 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:17,633 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK], DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK]) is bad. 2024-11-25T19:26:17,633 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741875_1058 2024-11-25T19:26:17,633 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK] 2024-11-25T19:26:17,634 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:17,635 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK], DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK]) is bad. 2024-11-25T19:26:17,635 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741876_1059 2024-11-25T19:26:17,635 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36773,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK] 2024-11-25T19:26:17,636 WARN [IPC Server handler 3 on default port 40559 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T19:26:17,636 WARN [IPC Server handler 3 on default port 40559 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T19:26:17,636 WARN [IPC Server handler 3 on default port 40559 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T19:26:17,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741877_1060 (size=6027) 2024-11-25T19:26:17,795 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@f0f37f0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34241, datanodeUuid=aa9270de-d9bc-455a-91c7-f448a6b72532, infoPort=33219, infoSecurePort=0, ipcPort=41789, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277):Failed to transfer BP-985132614-172.17.0.2-1732562755277:blk_1073741857_1040 to 127.0.0.1:46233 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:17,796 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@a7ca7f9[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34241, datanodeUuid=aa9270de-d9bc-455a-91c7-f448a6b72532, infoPort=33219, infoSecurePort=0, ipcPort=41789, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277):Failed to transfer BP-985132614-172.17.0.2-1732562755277:blk_1073741847_1030 to 127.0.0.1:46233 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:18,040 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp/info/e33af4af3cad4e58a8d24c3deaa40022 2024-11-25T19:26:18,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp/info/e33af4af3cad4e58a8d24c3deaa40022 as hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/e33af4af3cad4e58a8d24c3deaa40022 2024-11-25T19:26:18,054 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/e33af4af3cad4e58a8d24c3deaa40022, entries=1, sequenceid=45, filesize=5.9 K 2024-11-25T19:26:18,055 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 5500a35790ae594e549e5c2f71f85faa in 434ms, sequenceid=45, compaction requested=false 2024-11-25T19:26:18,055 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5500a35790ae594e549e5c2f71f85faa: 2024-11-25T19:26:18,055 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-25T19:26:18,055 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:26:18,056 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/2f11c840009f41408b6bd3f54cf69af2 because midkey is the same as first or last row 2024-11-25T19:26:18,141 WARN [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-25T19:26:18,141 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:18,180 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:18,242 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:26:18,246 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:26:18,247 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:26:18,247 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:26:18,247 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T19:26:18,248 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@792fa80c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:26:18,248 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d4ec789{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:26:18,357 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5dd0b56c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/java.io.tmpdir/jetty-localhost-39173-hadoop-hdfs-3_4_1-tests_jar-_-any-12251198282079577961/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:26:18,357 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3740407e{HTTP/1.1, (http/1.1)}{localhost:39173} 2024-11-25T19:26:18,357 INFO [Time-limited test {}] server.Server(415): Started @128945ms 2024-11-25T19:26:18,359 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T19:26:18,435 WARN [Thread-980 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T19:26:18,443 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc7a4df06b585064 with lease ID 0xcc28a8293c44895a: from storage DS-6f27d670-4d3c-4429-8270-08141043ae32 node DatanodeRegistration(127.0.0.1:45117, datanodeUuid=85f1322b-a253-4d11-a769-38483729e20a, infoPort=44245, infoSecurePort=0, ipcPort=45023, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:26:18,443 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc7a4df06b585064 with lease ID 0xcc28a8293c44895a: from storage DS-cbd42eba-4bfa-41a8-b588-898defd70479 node DatanodeRegistration(127.0.0.1:45117, datanodeUuid=85f1322b-a253-4d11-a769-38483729e20a, infoPort=44245, infoSecurePort=0, ipcPort=45023, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-25T19:26:19,353 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:19,795 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@f0f37f0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34241, datanodeUuid=aa9270de-d9bc-455a-91c7-f448a6b72532, infoPort=33219, infoSecurePort=0, ipcPort=41789, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277):Failed to transfer BP-985132614-172.17.0.2-1732562755277:blk_1073741872_1055 to 127.0.0.1:34341 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:19,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741867_1050 (size=6027) 2024-11-25T19:26:20,141 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:20,181 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:20,795 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@a7ca7f9[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34241, datanodeUuid=aa9270de-d9bc-455a-91c7-f448a6b72532, infoPort=33219, infoSecurePort=0, ipcPort=41789, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277):Failed to transfer BP-985132614-172.17.0.2-1732562755277:blk_1073741877_1060 to 127.0.0.1:34341 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:21,354 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:22,142 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:22,181 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:23,354 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:24,142 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:24,181 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:25,354 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:25,977 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T19:26:26,143 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:26,182 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:26,294 ERROR [FSHLog-0-hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData-prefix:6ef6ccb75414,40353,1732562756001 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:26,294 WARN [FSHLog-0-hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData-prefix:6ef6ccb75414,40353,1732562756001 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:26,294 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 6ef6ccb75414%2C40353%2C1732562756001:(num 1732562756209) roll requested 2024-11-25T19:26:26,294 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C40353%2C1732562756001.1732562786294 2024-11-25T19:26:26,298 WARN [Thread-1001 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36373 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:26,298 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1084990843_22 at /127.0.0.1:46286 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741878_1061] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data8]'}, localName='127.0.0.1:34241', datanodeUuid='aa9270de-d9bc-455a-91c7-f448a6b72532', xmitsInProgress=0}:Exception transferring block BP-985132614-172.17.0.2-1732562755277:blk_1073741878_1061 to mirror 127.0.0.1:36373 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:26,298 WARN [Thread-1001 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK], DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK]) is bad. 2024-11-25T19:26:26,299 WARN [Thread-1001 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741878_1061 2024-11-25T19:26:26,299 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1084990843_22 at /127.0.0.1:46286 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741878_1061] {}] datanode.BlockReceiver(316): Block 1073741878 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-25T19:26:26,299 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1084990843_22 at /127.0.0.1:46286 [Receiving block BP-985132614-172.17.0.2-1732562755277:blk_1073741878_1061] {}] datanode.DataXceiver(331): 127.0.0.1:34241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46286 dst: /127.0.0.1:34241 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:26,299 WARN [Thread-1001 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36373,DS-c6265e3c-00f6-47ca-97d6-a1a0caff7e31,DISK] 2024-11-25T19:26:26,303 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:26,304 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:26,304 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:26,304 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:26,304 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:26,304 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/WALs/6ef6ccb75414,40353,1732562756001/6ef6ccb75414%2C40353%2C1732562756001.1732562756209 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/WALs/6ef6ccb75414,40353,1732562756001/6ef6ccb75414%2C40353%2C1732562756001.1732562786294 2024-11-25T19:26:26,305 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:26,305 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:26,305 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/WALs/6ef6ccb75414,40353,1732562756001/6ef6ccb75414%2C40353%2C1732562756001.1732562756209 2024-11-25T19:26:26,305 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33219:33219),(127.0.0.1/127.0.0.1:44245:44245)] 2024-11-25T19:26:26,305 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/WALs/6ef6ccb75414,40353,1732562756001/6ef6ccb75414%2C40353%2C1732562756001.1732562756209 is not closed yet, will try archiving it next time 2024-11-25T19:26:26,305 WARN [IPC Server handler 2 on default port 40559 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/WALs/6ef6ccb75414,40353,1732562756001/6ef6ccb75414%2C40353%2C1732562756001.1732562756209 has not been closed. Lease recovery is in progress. RecoveryId = 1063 for block blk_1073741830_1006 2024-11-25T19:26:26,306 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/WALs/6ef6ccb75414,40353,1732562756001/6ef6ccb75414%2C40353%2C1732562756001.1732562756209 after 1ms 2024-11-25T19:26:27,355 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:28,143 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:28,458 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@1605c33c {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-985132614-172.17.0.2-1732562755277:blk_1073741832_1008, datanode=DatanodeInfoWithStorage[127.0.0.1:46233,null,null]) java.net.ConnectException: Call From 6ef6ccb75414/172.17.0.2 to localhost:32821 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-25T19:26:28,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741832_1019 (size=455) 2024-11-25T19:26:29,050 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562756536 to hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/oldWALs/6ef6ccb75414%2C38981%2C1732562756052.1732562756536 2024-11-25T19:26:29,053 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562774050 to hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/oldWALs/6ef6ccb75414%2C38981%2C1732562756052.1732562774050 2024-11-25T19:26:29,355 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:29,443 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@27d937e9[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45117, datanodeUuid=85f1322b-a253-4d11-a769-38483729e20a, infoPort=44245, infoSecurePort=0, ipcPort=45023, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277):Failed to transfer BP-985132614-172.17.0.2-1732562755277:blk_1073741832_1019 to 127.0.0.1:34341 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:30,143 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:30,308 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/WALs/6ef6ccb75414,40353,1732562756001/6ef6ccb75414%2C40353%2C1732562756001.1732562756209 after 4003ms 2024-11-25T19:26:31,356 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:32,144 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:32,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741835_1011 (size=393) 2024-11-25T19:26:32,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741833_1009 (size=32) 2024-11-25T19:26:33,356 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:33,443 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@27d937e9[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45117, datanodeUuid=85f1322b-a253-4d11-a769-38483729e20a, infoPort=44245, infoSecurePort=0, ipcPort=45023, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277):Failed to transfer BP-985132614-172.17.0.2-1732562755277:blk_1073741829_1005 to 127.0.0.1:34341 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:33,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741831_1007 (size=1321) 2024-11-25T19:26:33,944 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C38981%2C1732562756052.1732562793944 2024-11-25T19:26:33,949 WARN [Thread-1016 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:33,949 WARN [Thread-1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741880_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK], DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK]) is bad. 2024-11-25T19:26:33,950 WARN [Thread-1016 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741880_1064 2024-11-25T19:26:33,951 WARN [Thread-1016 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK] 2024-11-25T19:26:33,958 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:33,958 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:33,958 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:33,958 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:33,958 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:33,959 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562776116 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562793944 2024-11-25T19:26:33,959 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44245:44245),(127.0.0.1/127.0.0.1:33219:33219)] 2024-11-25T19:26:33,960 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562776116 is not closed yet, will try archiving it next time 2024-11-25T19:26:33,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741862_1045 (size=13591) 2024-11-25T19:26:33,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38981 {}] regionserver.HRegion(8855): Flush requested on 5500a35790ae594e549e5c2f71f85faa 2024-11-25T19:26:33,970 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5500a35790ae594e549e5c2f71f85faa 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-25T19:26:33,976 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp/info/bdf99a194f53489795eaa7d5bc7c6382 is 1080, key is row0013/info:/1732562793961/Put/seqid=0 2024-11-25T19:26:33,977 WARN [Thread-1022 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:33,978 WARN [Thread-1022 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741882_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK], DatanodeInfoWithStorage[127.0.0.1:45117,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK]) is bad. 2024-11-25T19:26:33,978 WARN [Thread-1022 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741882_1066 2024-11-25T19:26:33,978 WARN [Thread-1022 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK] 2024-11-25T19:26:33,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741883_1067 (size=11421) 2024-11-25T19:26:33,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741883_1067 (size=11421) 2024-11-25T19:26:33,983 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp/info/bdf99a194f53489795eaa7d5bc7c6382 2024-11-25T19:26:33,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp/info/bdf99a194f53489795eaa7d5bc7c6382 as hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/bdf99a194f53489795eaa7d5bc7c6382 2024-11-25T19:26:33,996 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/bdf99a194f53489795eaa7d5bc7c6382, entries=6, sequenceid=55, filesize=11.2 K 2024-11-25T19:26:33,998 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for 5500a35790ae594e549e5c2f71f85faa in 28ms, sequenceid=55, compaction requested=true 2024-11-25T19:26:33,998 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5500a35790ae594e549e5c2f71f85faa: 2024-11-25T19:26:33,998 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-11-25T19:26:33,998 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:26:33,998 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/2f11c840009f41408b6bd3f54cf69af2 because midkey is the same as first or last row 2024-11-25T19:26:33,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5500a35790ae594e549e5c2f71f85faa:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T19:26:33,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:26:33,998 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T19:26:34,000 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T19:26:34,000 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.HStore(1541): 5500a35790ae594e549e5c2f71f85faa/info is initiating minor compaction (all files) 2024-11-25T19:26:34,000 INFO [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 5500a35790ae594e549e5c2f71f85faa/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa. 2024-11-25T19:26:34,000 INFO [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/2f11c840009f41408b6bd3f54cf69af2, hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/e33af4af3cad4e58a8d24c3deaa40022, hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/bdf99a194f53489795eaa7d5bc7c6382] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp, totalSize=34.6 K 2024-11-25T19:26:34,000 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2f11c840009f41408b6bd3f54cf69af2, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732562770039 2024-11-25T19:26:34,001 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] compactions.Compactor(225): Compacting e33af4af3cad4e58a8d24c3deaa40022, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732562777618 2024-11-25T19:26:34,001 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] compactions.Compactor(225): Compacting bdf99a194f53489795eaa7d5bc7c6382, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732562778030 2024-11-25T19:26:34,018 INFO [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5500a35790ae594e549e5c2f71f85faa#info#compaction#24 average throughput is 17.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T19:26:34,018 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp/info/b2824607230440a3aa523ea68f7b8899 is 1080, key is row0002/info:/1732562770039/Put/seqid=0 2024-11-25T19:26:34,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741884_1068 (size=23502) 2024-11-25T19:26:34,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741884_1068 (size=23502) 2024-11-25T19:26:34,031 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp/info/b2824607230440a3aa523ea68f7b8899 as hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/b2824607230440a3aa523ea68f7b8899 2024-11-25T19:26:34,038 INFO [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 5500a35790ae594e549e5c2f71f85faa/info of 5500a35790ae594e549e5c2f71f85faa into b2824607230440a3aa523ea68f7b8899(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T19:26:34,038 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 5500a35790ae594e549e5c2f71f85faa: 2024-11-25T19:26:34,038 INFO [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa., storeName=5500a35790ae594e549e5c2f71f85faa/info, priority=13, startTime=1732562793998; duration=0sec 2024-11-25T19:26:34,038 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-25T19:26:34,038 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:26:34,038 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/b2824607230440a3aa523ea68f7b8899 because midkey is the same as first or last row 2024-11-25T19:26:34,038 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-25T19:26:34,038 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:26:34,038 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/b2824607230440a3aa523ea68f7b8899 because midkey is the same as first or last row 2024-11-25T19:26:34,038 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-25T19:26:34,038 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:26:34,038 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/b2824607230440a3aa523ea68f7b8899 because midkey is the same as first or last row 2024-11-25T19:26:34,038 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:26:34,038 DEBUG [RS:0;6ef6ccb75414:38981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5500a35790ae594e549e5c2f71f85faa:info 2024-11-25T19:26:34,145 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-25T19:26:34,145 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:34,185 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T19:26:34,185 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T19:26:34,185 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T19:26:34,185 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:26:34,185 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:26:34,185 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T19:26:34,186 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T19:26:34,186 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1403695941, stopped=false 2024-11-25T19:26:34,186 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6ef6ccb75414,40353,1732562756001 2024-11-25T19:26:34,187 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38981-0x1007858fdfb0001, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T19:26:34,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T19:26:34,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x1007858fdfb0002, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T19:26:34,187 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38981-0x1007858fdfb0001, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:26:34,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:26:34,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x1007858fdfb0002, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:26:34,188 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T19:26:34,188 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T19:26:34,188 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T19:26:34,188 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:26:34,188 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38981-0x1007858fdfb0001, quorum=127.0.0.1:56329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:26:34,188 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37563-0x1007858fdfb0002, quorum=127.0.0.1:56329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:26:34,189 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6ef6ccb75414,38981,1732562756052' ***** 2024-11-25T19:26:34,189 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T19:26:34,189 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6ef6ccb75414,37563,1732562757286' ***** 2024-11-25T19:26:34,189 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T19:26:34,189 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:26:34,189 INFO [RS:0;6ef6ccb75414:38981 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T19:26:34,189 INFO [RS:1;6ef6ccb75414:37563 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T19:26:34,189 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T19:26:34,189 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T19:26:34,189 INFO [RS:0;6ef6ccb75414:38981 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T19:26:34,190 INFO [RS:0;6ef6ccb75414:38981 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T19:26:34,190 INFO [RS:1;6ef6ccb75414:37563 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T19:26:34,190 INFO [RS:1;6ef6ccb75414:37563 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T19:26:34,190 INFO [RS:1;6ef6ccb75414:37563 {}] regionserver.HRegionServer(959): stopping server 6ef6ccb75414,37563,1732562757286 2024-11-25T19:26:34,190 INFO [RS:1;6ef6ccb75414:37563 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T19:26:34,190 INFO [RS:1;6ef6ccb75414:37563 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;6ef6ccb75414:37563. 2024-11-25T19:26:34,190 INFO [RS:0;6ef6ccb75414:38981 {}] regionserver.HRegionServer(3091): Received CLOSE for 5500a35790ae594e549e5c2f71f85faa 2024-11-25T19:26:34,190 DEBUG [RS:1;6ef6ccb75414:37563 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T19:26:34,190 DEBUG [RS:1;6ef6ccb75414:37563 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:26:34,190 INFO [RS:1;6ef6ccb75414:37563 {}] regionserver.HRegionServer(976): stopping server 6ef6ccb75414,37563,1732562757286; all regions closed. 2024-11-25T19:26:34,190 INFO [RS:0;6ef6ccb75414:38981 {}] regionserver.HRegionServer(959): stopping server 6ef6ccb75414,38981,1732562756052 2024-11-25T19:26:34,190 INFO [RS:0;6ef6ccb75414:38981 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T19:26:34,191 INFO [RS:0;6ef6ccb75414:38981 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6ef6ccb75414:38981. 2024-11-25T19:26:34,191 DEBUG [RS:0;6ef6ccb75414:38981 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T19:26:34,191 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5500a35790ae594e549e5c2f71f85faa, disabling compactions & flushes 2024-11-25T19:26:34,191 DEBUG [RS:0;6ef6ccb75414:38981 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:26:34,191 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa. 2024-11-25T19:26:34,191 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:34,191 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa. 2024-11-25T19:26:34,191 INFO [RS:0;6ef6ccb75414:38981 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T19:26:34,191 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa. after waiting 0 ms 2024-11-25T19:26:34,191 INFO [RS:0;6ef6ccb75414:38981 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T19:26:34,191 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:34,191 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa. 2024-11-25T19:26:34,191 INFO [RS:0;6ef6ccb75414:38981 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T19:26:34,191 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:34,191 INFO [RS:0;6ef6ccb75414:38981 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T19:26:34,191 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 5500a35790ae594e549e5c2f71f85faa 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-11-25T19:26:34,191 INFO [RS:0;6ef6ccb75414:38981 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-25T19:26:34,191 DEBUG [RS:0;6ef6ccb75414:38981 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 5500a35790ae594e549e5c2f71f85faa=TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa.} 2024-11-25T19:26:34,191 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T19:26:34,191 DEBUG [RS:0;6ef6ccb75414:38981 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 5500a35790ae594e549e5c2f71f85faa 2024-11-25T19:26:34,191 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T19:26:34,192 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T19:26:34,192 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T19:26:34,192 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:34,192 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T19:26:34,192 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:34,192 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-25T19:26:34,192 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:34,192 ERROR [FSHLog-0-hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555-prefix:6ef6ccb75414,38981,1732562756052.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:34,192 WARN [FSHLog-0-hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555-prefix:6ef6ccb75414,38981,1732562756052.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:34,192 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:34,192 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 2024-11-25T19:26:34,192 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6ef6ccb75414%2C38981%2C1732562756052.meta:.meta(num 1732562757075) roll requested 2024-11-25T19:26:34,193 INFO [regionserver/6ef6ccb75414:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C38981%2C1732562756052.meta.1732562794192.meta 2024-11-25T19:26:34,193 WARN [IPC Server handler 3 on default port 40559 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 has not been closed. Lease recovery is in progress. RecoveryId = 1069 for block blk_1073741837_1013 2024-11-25T19:26:34,193 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 after 1ms 2024-11-25T19:26:34,196 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp/info/a0d16e25b817443889fe11d89674b3c8 is 1080, key is row0018/info:/1732562793971/Put/seqid=0 2024-11-25T19:26:34,199 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:34,199 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:34,200 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:34,200 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:34,200 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:34,200 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562794192.meta 2024-11-25T19:26:34,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741886_1071 (size=11421) 2024-11-25T19:26:34,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741886_1071 (size=11421) 2024-11-25T19:26:34,201 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:34,201 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46233,DS-781665b5-4509-4c5d-b6a1-617bec384944,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:34,201 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta 2024-11-25T19:26:34,201 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44245:44245),(127.0.0.1/127.0.0.1:33219:33219)] 2024-11-25T19:26:34,201 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta is not closed yet, will try archiving it next time 2024-11-25T19:26:34,201 WARN [IPC Server handler 3 on default port 40559 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta has not been closed. Lease recovery is in progress. RecoveryId = 1072 for block blk_1073741834_1010 2024-11-25T19:26:34,201 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp/info/a0d16e25b817443889fe11d89674b3c8 2024-11-25T19:26:34,202 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta after 0ms 2024-11-25T19:26:34,208 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/.tmp/info/a0d16e25b817443889fe11d89674b3c8 as hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/a0d16e25b817443889fe11d89674b3c8 2024-11-25T19:26:34,214 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/a0d16e25b817443889fe11d89674b3c8, entries=6, sequenceid=65, filesize=11.2 K 2024-11-25T19:26:34,216 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 5500a35790ae594e549e5c2f71f85faa in 24ms, sequenceid=65, compaction requested=false 2024-11-25T19:26:34,216 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/75e87558d04342a7830dbd27b558ed86, hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/09e337a606fc4ae797906aeec9d71a9e, hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/2f11c840009f41408b6bd3f54cf69af2, hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/a1d5c96c616c4f48912d33f27c61a5d2, hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/e33af4af3cad4e58a8d24c3deaa40022, hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/bdf99a194f53489795eaa7d5bc7c6382] to archive 2024-11-25T19:26:34,217 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/hbase/meta/1588230740/.tmp/info/1c7a115c6d7f4f088c870fc79d81268e is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa./info:regioninfo/1732562757756/Put/seqid=0 2024-11-25T19:26:34,217 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T19:26:34,218 WARN [Thread-1049 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:34,219 WARN [Thread-1049 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741887_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK], DatanodeInfoWithStorage[127.0.0.1:34241,DS-7bfb59bf-4348-4153-aff5-208d46d6ffb7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK]) is bad. 2024-11-25T19:26:34,219 WARN [Thread-1049 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741887_1073 2024-11-25T19:26:34,219 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/75e87558d04342a7830dbd27b558ed86 to hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/75e87558d04342a7830dbd27b558ed86 2024-11-25T19:26:34,219 WARN [Thread-1049 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK] 2024-11-25T19:26:34,220 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/09e337a606fc4ae797906aeec9d71a9e to hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/09e337a606fc4ae797906aeec9d71a9e 2024-11-25T19:26:34,222 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/2f11c840009f41408b6bd3f54cf69af2 to hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/2f11c840009f41408b6bd3f54cf69af2 2024-11-25T19:26:34,223 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/a1d5c96c616c4f48912d33f27c61a5d2 to hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/a1d5c96c616c4f48912d33f27c61a5d2 2024-11-25T19:26:34,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741888_1074 (size=7089) 2024-11-25T19:26:34,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741888_1074 (size=7089) 2024-11-25T19:26:34,225 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/hbase/meta/1588230740/.tmp/info/1c7a115c6d7f4f088c870fc79d81268e 2024-11-25T19:26:34,225 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/e33af4af3cad4e58a8d24c3deaa40022 to hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/e33af4af3cad4e58a8d24c3deaa40022 2024-11-25T19:26:34,227 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/bdf99a194f53489795eaa7d5bc7c6382 to hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/info/bdf99a194f53489795eaa7d5bc7c6382 2024-11-25T19:26:34,227 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=6ef6ccb75414:40353 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-25T19:26:34,227 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [75e87558d04342a7830dbd27b558ed86=10347, 09e337a606fc4ae797906aeec9d71a9e=12506, 2f11c840009f41408b6bd3f54cf69af2=17994, a1d5c96c616c4f48912d33f27c61a5d2=6027, e33af4af3cad4e58a8d24c3deaa40022=6027, bdf99a194f53489795eaa7d5bc7c6382=11421] 2024-11-25T19:26:34,231 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5500a35790ae594e549e5c2f71f85faa/recovered.edits/68.seqid, newMaxSeqId=68, maxSeqId=1 2024-11-25T19:26:34,232 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa. 2024-11-25T19:26:34,232 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5500a35790ae594e549e5c2f71f85faa: Waiting for close lock at 1732562794191Running coprocessor pre-close hooks at 1732562794191Disabling compacts and flushes for region at 1732562794191Disabling writes for close at 1732562794191Obtaining lock to block concurrent updates at 1732562794191Preparing flush snapshotting stores in 5500a35790ae594e549e5c2f71f85faa at 1732562794191Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa., syncing WAL and waiting on mvcc, flushsize=dataSize=6455, getHeapSize=7152, getOffHeapSize=0, getCellsCount=6 at 1732562794191Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa. at 1732562794192 (+1 ms)Flushing 5500a35790ae594e549e5c2f71f85faa/info: creating writer at 1732562794192Flushing 5500a35790ae594e549e5c2f71f85faa/info: appending metadata at 1732562794195 (+3 ms)Flushing 5500a35790ae594e549e5c2f71f85faa/info: closing flushed file at 1732562794195Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38f6de1d: reopening flushed file at 1732562794207 (+12 ms)Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 5500a35790ae594e549e5c2f71f85faa in 24ms, sequenceid=65, compaction requested=false at 1732562794216 (+9 ms)Writing region close event to WAL at 1732562794228 (+12 ms)Running coprocessor post-close hooks at 1732562794232 (+4 ms)Closed at 1732562794232 2024-11-25T19:26:34,232 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732562757383.5500a35790ae594e549e5c2f71f85faa. 2024-11-25T19:26:34,251 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/hbase/meta/1588230740/.tmp/ns/da6cda9afbd44ee797cde38bbb3e3077 is 43, key is default/ns:d/1732562757135/Put/seqid=0 2024-11-25T19:26:34,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741889_1075 (size=5153) 2024-11-25T19:26:34,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741889_1075 (size=5153) 2024-11-25T19:26:34,257 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/hbase/meta/1588230740/.tmp/ns/da6cda9afbd44ee797cde38bbb3e3077 2024-11-25T19:26:34,275 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/hbase/meta/1588230740/.tmp/table/748fe55f7be445f78bc2a63c4f97f281 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732562757773/Put/seqid=0 2024-11-25T19:26:34,277 WARN [Thread-1062 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1076 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:34,277 WARN [Thread-1062 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985132614-172.17.0.2-1732562755277:blk_1073741890_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK], DatanodeInfoWithStorage[127.0.0.1:45117,DS-6f27d670-4d3c-4429-8270-08141043ae32,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK]) is bad. 2024-11-25T19:26:34,277 WARN [Thread-1062 {}] hdfs.DataStreamer(1850): Abandoning BP-985132614-172.17.0.2-1732562755277:blk_1073741890_1076 2024-11-25T19:26:34,278 WARN [Thread-1062 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34341,DS-1b7f4d92-cbcf-4d39-947c-f9f4cb5b904f,DISK] 2024-11-25T19:26:34,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741891_1077 (size=5424) 2024-11-25T19:26:34,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741891_1077 (size=5424) 2024-11-25T19:26:34,283 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/hbase/meta/1588230740/.tmp/table/748fe55f7be445f78bc2a63c4f97f281 2024-11-25T19:26:34,289 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/hbase/meta/1588230740/.tmp/info/1c7a115c6d7f4f088c870fc79d81268e as hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/hbase/meta/1588230740/info/1c7a115c6d7f4f088c870fc79d81268e 2024-11-25T19:26:34,295 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/hbase/meta/1588230740/info/1c7a115c6d7f4f088c870fc79d81268e, entries=10, sequenceid=11, filesize=6.9 K 2024-11-25T19:26:34,296 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/hbase/meta/1588230740/.tmp/ns/da6cda9afbd44ee797cde38bbb3e3077 as hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/hbase/meta/1588230740/ns/da6cda9afbd44ee797cde38bbb3e3077 2024-11-25T19:26:34,302 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/hbase/meta/1588230740/ns/da6cda9afbd44ee797cde38bbb3e3077, entries=2, sequenceid=11, filesize=5.0 K 2024-11-25T19:26:34,303 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/hbase/meta/1588230740/.tmp/table/748fe55f7be445f78bc2a63c4f97f281 as hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/hbase/meta/1588230740/table/748fe55f7be445f78bc2a63c4f97f281 2024-11-25T19:26:34,309 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/hbase/meta/1588230740/table/748fe55f7be445f78bc2a63c4f97f281, entries=2, sequenceid=11, filesize=5.3 K 2024-11-25T19:26:34,310 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 118ms, sequenceid=11, compaction requested=false 2024-11-25T19:26:34,315 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-25T19:26:34,316 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T19:26:34,316 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T19:26:34,316 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732562794191Running coprocessor pre-close hooks at 1732562794191Disabling compacts and flushes for region at 1732562794191Disabling writes for close at 1732562794192 (+1 ms)Obtaining lock to block concurrent updates at 1732562794192Preparing flush snapshotting stores in 1588230740 at 1732562794192Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732562794192Flushing stores of hbase:meta,,1.1588230740 at 1732562794201 (+9 ms)Flushing 1588230740/info: creating writer at 1732562794202 (+1 ms)Flushing 1588230740/info: appending metadata at 1732562794216 (+14 ms)Flushing 1588230740/info: closing flushed file at 1732562794216Flushing 1588230740/ns: creating writer at 1732562794232 (+16 ms)Flushing 1588230740/ns: appending metadata at 1732562794250 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1732562794250Flushing 1588230740/table: creating writer at 1732562794262 (+12 ms)Flushing 1588230740/table: appending metadata at 1732562794275 (+13 ms)Flushing 1588230740/table: closing flushed file at 1732562794275Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@55aad2ee: reopening flushed file at 1732562794288 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24eb2659: reopening flushed file at 1732562794295 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@71d98edb: reopening flushed file at 1732562794302 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 118ms, sequenceid=11, compaction requested=false at 1732562794310 (+8 ms)Writing region close event to WAL at 1732562794312 (+2 ms)Running coprocessor post-close hooks at 1732562794316 (+4 ms)Closed at 1732562794316 2024-11-25T19:26:34,316 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T19:26:34,353 INFO [regionserver/6ef6ccb75414:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-25T19:26:34,353 INFO [regionserver/6ef6ccb75414:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-25T19:26:34,361 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.1732562776116 to hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/oldWALs/6ef6ccb75414%2C38981%2C1732562756052.1732562776116 2024-11-25T19:26:34,387 INFO [regionserver/6ef6ccb75414:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-25T19:26:34,387 INFO [regionserver/6ef6ccb75414:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-25T19:26:34,389 INFO [regionserver/6ef6ccb75414:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T19:26:34,392 INFO [RS:0;6ef6ccb75414:38981 {}] regionserver.HRegionServer(976): stopping server 6ef6ccb75414,38981,1732562756052; all regions closed. 2024-11-25T19:26:34,392 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:34,392 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:34,392 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:34,393 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:34,393 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:34,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741885_1070 (size=825) 2024-11-25T19:26:34,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741885_1070 (size=825) 2024-11-25T19:26:34,799 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@a7ca7f9[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34241, datanodeUuid=aa9270de-d9bc-455a-91c7-f448a6b72532, infoPort=33219, infoSecurePort=0, ipcPort=41789, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277):Failed to transfer BP-985132614-172.17.0.2-1732562755277:blk_1073741862_1045 to 127.0.0.1:34341 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:35,355 INFO [regionserver/6ef6ccb75414:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T19:26:35,385 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-25T19:26:35,387 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T19:26:35,387 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T19:26:35,441 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@27d937e9[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45117, datanodeUuid=85f1322b-a253-4d11-a769-38483729e20a, infoPort=44245, infoSecurePort=0, ipcPort=45023, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277):Failed to transfer BP-985132614-172.17.0.2-1732562755277:blk_1073741828_1004 to 127.0.0.1:34341 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:35,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741836_1012 (size=76) 2024-11-25T19:26:36,440 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4e8e1abd[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45117, datanodeUuid=85f1322b-a253-4d11-a769-38483729e20a, infoPort=44245, infoSecurePort=0, ipcPort=45023, storageInfo=lv=-57;cid=testClusterID;nsid=721132528;c=1732562755277):Failed to transfer BP-985132614-172.17.0.2-1732562755277:blk_1073741826_1002 to 127.0.0.1:34341 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:37,640 INFO [master/6ef6ccb75414:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-25T19:26:37,640 INFO [master/6ef6ccb75414:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-25T19:26:38,195 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 after 4003ms 2024-11-25T19:26:38,203 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta after 4002ms 2024-11-25T19:26:38,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741827_1003 (size=196) 2024-11-25T19:26:38,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741825_1001 (size=7) 2024-11-25T19:26:38,462 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@36b8b0c8 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-985132614-172.17.0.2-1732562755277:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:46233,null,null]) java.net.ConnectException: Call From 6ef6ccb75414/172.17.0.2 to localhost:32821 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-25T19:26:39,193 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-25T19:26:39,199 DEBUG [RS:1;6ef6ccb75414:37563 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/oldWALs 2024-11-25T19:26:39,199 INFO [RS:1;6ef6ccb75414:37563 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6ef6ccb75414%2C37563%2C1732562757286:(num 1732562757484) 2024-11-25T19:26:39,199 DEBUG [RS:1;6ef6ccb75414:37563 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:26:39,199 INFO [RS:1;6ef6ccb75414:37563 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T19:26:39,200 INFO [RS:1;6ef6ccb75414:37563 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T19:26:39,200 INFO [RS:1;6ef6ccb75414:37563 {}] hbase.ChoreService(370): Chore service for: regionserver/6ef6ccb75414:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-25T19:26:39,201 INFO [RS:1;6ef6ccb75414:37563 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T19:26:39,201 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T19:26:39,201 INFO [RS:1;6ef6ccb75414:37563 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T19:26:39,201 INFO [RS:1;6ef6ccb75414:37563 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T19:26:39,201 INFO [RS:1;6ef6ccb75414:37563 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T19:26:39,201 INFO [RS:1;6ef6ccb75414:37563 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37563 2024-11-25T19:26:39,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x1007858fdfb0002, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6ef6ccb75414,37563,1732562757286 2024-11-25T19:26:39,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T19:26:39,204 INFO [RS:1;6ef6ccb75414:37563 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T19:26:39,206 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6ef6ccb75414,37563,1732562757286] 2024-11-25T19:26:39,207 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6ef6ccb75414,37563,1732562757286 already deleted, retry=false 2024-11-25T19:26:39,207 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6ef6ccb75414,37563,1732562757286 expired; onlineServers=1 2024-11-25T19:26:39,233 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:39,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:39,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:39,250 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:39,250 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:39,250 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:39,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:39,258 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:39,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x1007858fdfb0002, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:26:39,306 INFO [RS:1;6ef6ccb75414:37563 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T19:26:39,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x1007858fdfb0002, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:26:39,306 INFO [RS:1;6ef6ccb75414:37563 {}] regionserver.HRegionServer(1031): Exiting; stopping=6ef6ccb75414,37563,1732562757286; zookeeper connection closed. 2024-11-25T19:26:39,306 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@73b0ad3e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@73b0ad3e 2024-11-25T19:26:39,393 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-25T19:26:39,400 DEBUG [RS:0;6ef6ccb75414:38981 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/oldWALs 2024-11-25T19:26:39,400 INFO [RS:0;6ef6ccb75414:38981 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6ef6ccb75414%2C38981%2C1732562756052.meta:.meta(num 1732562794192) 2024-11-25T19:26:39,401 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:39,401 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:39,401 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:39,402 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:39,402 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:39,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741881_1065 (size=15140) 2024-11-25T19:26:39,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741881_1065 (size=15140) 2024-11-25T19:26:39,406 DEBUG [RS:0;6ef6ccb75414:38981 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/oldWALs 2024-11-25T19:26:39,406 INFO [RS:0;6ef6ccb75414:38981 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6ef6ccb75414%2C38981%2C1732562756052:(num 1732562793944) 2024-11-25T19:26:39,406 DEBUG [RS:0;6ef6ccb75414:38981 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:26:39,406 INFO [RS:0;6ef6ccb75414:38981 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T19:26:39,406 INFO [RS:0;6ef6ccb75414:38981 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T19:26:39,406 INFO [RS:0;6ef6ccb75414:38981 {}] hbase.ChoreService(370): Chore service for: regionserver/6ef6ccb75414:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-25T19:26:39,407 INFO [RS:0;6ef6ccb75414:38981 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T19:26:39,407 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T19:26:39,407 INFO [RS:0;6ef6ccb75414:38981 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38981 2024-11-25T19:26:39,408 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38981-0x1007858fdfb0001, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6ef6ccb75414,38981,1732562756052 2024-11-25T19:26:39,408 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T19:26:39,408 INFO [RS:0;6ef6ccb75414:38981 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T19:26:39,409 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6ef6ccb75414,38981,1732562756052] 2024-11-25T19:26:39,409 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6ef6ccb75414,38981,1732562756052 already deleted, retry=false 2024-11-25T19:26:39,409 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6ef6ccb75414,38981,1732562756052 expired; onlineServers=0 2024-11-25T19:26:39,409 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6ef6ccb75414,40353,1732562756001' ***** 2024-11-25T19:26:39,409 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T19:26:39,409 INFO [M:0;6ef6ccb75414:40353 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T19:26:39,409 INFO [M:0;6ef6ccb75414:40353 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T19:26:39,410 DEBUG [M:0;6ef6ccb75414:40353 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T19:26:39,410 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T19:26:39,410 DEBUG [M:0;6ef6ccb75414:40353 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T19:26:39,410 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.large.0-1732562756306 {}] cleaner.HFileCleaner(306): Exit Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.large.0-1732562756306,5,FailOnTimeoutGroup] 2024-11-25T19:26:39,410 INFO [M:0;6ef6ccb75414:40353 {}] hbase.ChoreService(370): Chore service for: master/6ef6ccb75414:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T19:26:39,410 INFO [M:0;6ef6ccb75414:40353 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T19:26:39,410 DEBUG [M:0;6ef6ccb75414:40353 {}] master.HMaster(1795): Stopping service threads 2024-11-25T19:26:39,410 INFO [M:0;6ef6ccb75414:40353 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T19:26:39,410 INFO [M:0;6ef6ccb75414:40353 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T19:26:39,410 INFO [M:0;6ef6ccb75414:40353 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T19:26:39,410 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.small.0-1732562756309 {}] cleaner.HFileCleaner(306): Exit Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.small.0-1732562756309,5,FailOnTimeoutGroup] 2024-11-25T19:26:39,410 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T19:26:39,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T19:26:39,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:26:39,411 DEBUG [M:0;6ef6ccb75414:40353 {}] zookeeper.ZKUtil(347): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-25T19:26:39,411 WARN [M:0;6ef6ccb75414:40353 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-25T19:26:39,411 INFO [M:0;6ef6ccb75414:40353 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/.lastflushedseqids 2024-11-25T19:26:39,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741892_1078 (size=130) 2024-11-25T19:26:39,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741892_1078 (size=130) 2024-11-25T19:26:39,417 INFO [M:0;6ef6ccb75414:40353 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T19:26:39,417 INFO [M:0;6ef6ccb75414:40353 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T19:26:39,418 DEBUG [M:0;6ef6ccb75414:40353 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T19:26:39,418 INFO [M:0;6ef6ccb75414:40353 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:26:39,418 DEBUG [M:0;6ef6ccb75414:40353 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:26:39,418 DEBUG [M:0;6ef6ccb75414:40353 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T19:26:39,418 DEBUG [M:0;6ef6ccb75414:40353 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:26:39,418 INFO [M:0;6ef6ccb75414:40353 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-25T19:26:39,433 DEBUG [M:0;6ef6ccb75414:40353 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a86a4272321b4d7583844d04f042ccef is 82, key is hbase:meta,,1/info:regioninfo/1732562757117/Put/seqid=0 2024-11-25T19:26:39,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741893_1079 (size=5672) 2024-11-25T19:26:39,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741893_1079 (size=5672) 2024-11-25T19:26:39,438 INFO [M:0;6ef6ccb75414:40353 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a86a4272321b4d7583844d04f042ccef 2024-11-25T19:26:39,458 DEBUG [M:0;6ef6ccb75414:40353 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e3daee2612974b8e9b2884ca42e85db1 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732562757780/Put/seqid=0 2024-11-25T19:26:39,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741894_1080 (size=6255) 2024-11-25T19:26:39,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741894_1080 (size=6255) 2024-11-25T19:26:39,463 INFO [M:0;6ef6ccb75414:40353 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e3daee2612974b8e9b2884ca42e85db1 2024-11-25T19:26:39,469 INFO [M:0;6ef6ccb75414:40353 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e3daee2612974b8e9b2884ca42e85db1 2024-11-25T19:26:39,484 DEBUG [M:0;6ef6ccb75414:40353 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/01c563cbbb834d46a48b272963d0a1dc is 69, key is 6ef6ccb75414,37563,1732562757286/rs:state/1732562757335/Put/seqid=0 2024-11-25T19:26:39,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741895_1081 (size=5224) 2024-11-25T19:26:39,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741895_1081 (size=5224) 2024-11-25T19:26:39,490 INFO [M:0;6ef6ccb75414:40353 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/01c563cbbb834d46a48b272963d0a1dc 2024-11-25T19:26:39,509 DEBUG [M:0;6ef6ccb75414:40353 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/028df7285e004b388ac9ccde4a9bf524 is 52, key is load_balancer_on/state:d/1732562757237/Put/seqid=0 2024-11-25T19:26:39,509 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38981-0x1007858fdfb0001, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:26:39,509 INFO [RS:0;6ef6ccb75414:38981 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T19:26:39,509 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38981-0x1007858fdfb0001, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:26:39,509 INFO [RS:0;6ef6ccb75414:38981 {}] regionserver.HRegionServer(1031): Exiting; stopping=6ef6ccb75414,38981,1732562756052; zookeeper connection closed. 2024-11-25T19:26:39,509 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1c429ad6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1c429ad6 2024-11-25T19:26:39,510 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-25T19:26:39,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741896_1082 (size=5056) 2024-11-25T19:26:39,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741896_1082 (size=5056) 2024-11-25T19:26:39,514 INFO [M:0;6ef6ccb75414:40353 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/028df7285e004b388ac9ccde4a9bf524 2024-11-25T19:26:39,520 DEBUG [M:0;6ef6ccb75414:40353 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a86a4272321b4d7583844d04f042ccef as hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a86a4272321b4d7583844d04f042ccef 2024-11-25T19:26:39,526 INFO [M:0;6ef6ccb75414:40353 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a86a4272321b4d7583844d04f042ccef, entries=8, sequenceid=60, filesize=5.5 K 2024-11-25T19:26:39,527 DEBUG [M:0;6ef6ccb75414:40353 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e3daee2612974b8e9b2884ca42e85db1 as hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e3daee2612974b8e9b2884ca42e85db1 2024-11-25T19:26:39,533 INFO [M:0;6ef6ccb75414:40353 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e3daee2612974b8e9b2884ca42e85db1 2024-11-25T19:26:39,533 INFO [M:0;6ef6ccb75414:40353 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e3daee2612974b8e9b2884ca42e85db1, entries=6, sequenceid=60, filesize=6.1 K 2024-11-25T19:26:39,535 DEBUG [M:0;6ef6ccb75414:40353 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/01c563cbbb834d46a48b272963d0a1dc as hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/01c563cbbb834d46a48b272963d0a1dc 2024-11-25T19:26:39,542 INFO [M:0;6ef6ccb75414:40353 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/01c563cbbb834d46a48b272963d0a1dc, entries=2, sequenceid=60, filesize=5.1 K 2024-11-25T19:26:39,543 DEBUG [M:0;6ef6ccb75414:40353 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/028df7285e004b388ac9ccde4a9bf524 as hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/028df7285e004b388ac9ccde4a9bf524 2024-11-25T19:26:39,549 INFO [M:0;6ef6ccb75414:40353 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/028df7285e004b388ac9ccde4a9bf524, entries=1, sequenceid=60, filesize=4.9 K 2024-11-25T19:26:39,550 INFO [M:0;6ef6ccb75414:40353 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=60, compaction requested=false 2024-11-25T19:26:39,552 INFO [M:0;6ef6ccb75414:40353 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:26:39,552 DEBUG [M:0;6ef6ccb75414:40353 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732562799418Disabling compacts and flushes for region at 1732562799418Disabling writes for close at 1732562799418Obtaining lock to block concurrent updates at 1732562799418Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732562799418Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1732562799418Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732562799419 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732562799419Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732562799432 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732562799432Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732562799443 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732562799457 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732562799457Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732562799469 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732562799484 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732562799484Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732562799495 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732562799508 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732562799508Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@562a4160: reopening flushed file at 1732562799519 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@621cfbe7: reopening flushed file at 1732562799526 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@28650a78: reopening flushed file at 1732562799534 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40d1807d: reopening flushed file at 1732562799542 (+8 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=60, compaction requested=false at 1732562799550 (+8 ms)Writing region close event to WAL at 1732562799552 (+2 ms)Closed at 1732562799552 2024-11-25T19:26:39,553 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:39,554 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:39,554 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:39,554 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:39,554 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:39,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741879_1062 (size=1045) 2024-11-25T19:26:39,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34241 is added to blk_1073741879_1062 (size=1045) 2024-11-25T19:26:39,557 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T19:26:39,557 INFO [M:0;6ef6ccb75414:40353 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T19:26:39,557 INFO [M:0;6ef6ccb75414:40353 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40353 2024-11-25T19:26:39,557 INFO [M:0;6ef6ccb75414:40353 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T19:26:39,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:26:39,659 INFO [M:0;6ef6ccb75414:40353 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T19:26:39,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40353-0x1007858fdfb0000, quorum=127.0.0.1:56329, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:26:39,663 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5dd0b56c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:26:39,664 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3740407e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:26:39,664 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:26:39,664 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d4ec789{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:26:39,664 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@792fa80c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/hadoop.log.dir/,STOPPED} 2024-11-25T19:26:39,666 WARN [BP-985132614-172.17.0.2-1732562755277 heartbeating to localhost/127.0.0.1:40559 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T19:26:39,666 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T19:26:39,666 WARN [BP-985132614-172.17.0.2-1732562755277 heartbeating to localhost/127.0.0.1:40559 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-985132614-172.17.0.2-1732562755277 (Datanode Uuid 85f1322b-a253-4d11-a769-38483729e20a) service to localhost/127.0.0.1:40559 2024-11-25T19:26:39,666 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T19:26:39,666 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@762a3c73 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-985132614-172.17.0.2-1732562755277:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:46233,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:32821 , LocalHost:localPort 6ef6ccb75414/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-25T19:26:39,666 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@762a3c73 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-985132614-172.17.0.2-1732562755277:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:45117,null,null], DatanodeInfoWithStorage[127.0.0.1:46233,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-985132614-172.17.0.2-1732562755277 2024-11-25T19:26:39,667 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data3/current/BP-985132614-172.17.0.2-1732562755277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:26:39,666 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@762a3c73 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-985132614-172.17.0.2-1732562755277:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:46233,null,null]) java.io.IOException: No block pool offer service for bpid=BP-985132614-172.17.0.2-1732562755277 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:39,667 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@762a3c73 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-985132614-172.17.0.2-1732562755277:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:45117,null,null]) java.io.IOException: No block pool offer service for bpid=BP-985132614-172.17.0.2-1732562755277 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:39,667 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@762a3c73 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-985132614-172.17.0.2-1732562755277:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:46233,null,null], DatanodeInfoWithStorage[127.0.0.1:45117,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-985132614-172.17.0.2-1732562755277:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:46233,null,null], DatanodeInfoWithStorage[127.0.0.1:45117,null,null]] 2024-11-25T19:26:39,667 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data4/current/BP-985132614-172.17.0.2-1732562755277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:26:39,667 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T19:26:39,669 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@337c5dd4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:26:39,670 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5b5be5aa{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:26:39,670 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:26:39,670 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74c44b7d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:26:39,670 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c1aeedb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/hadoop.log.dir/,STOPPED} 2024-11-25T19:26:39,673 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T19:26:39,673 WARN [BP-985132614-172.17.0.2-1732562755277 heartbeating to localhost/127.0.0.1:40559 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T19:26:39,673 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T19:26:39,673 WARN [BP-985132614-172.17.0.2-1732562755277 heartbeating to localhost/127.0.0.1:40559 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-985132614-172.17.0.2-1732562755277 (Datanode Uuid aa9270de-d9bc-455a-91c7-f448a6b72532) service to localhost/127.0.0.1:40559 2024-11-25T19:26:39,674 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data7/current/BP-985132614-172.17.0.2-1732562755277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:26:39,674 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/cluster_620c101c-129e-6ef6-dee5-8bda1b072bdd/data/data8/current/BP-985132614-172.17.0.2-1732562755277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:26:39,674 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T19:26:39,680 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7cd2a640{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T19:26:39,680 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64b7b556{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:26:39,681 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:26:39,681 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ed3a961{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:26:39,681 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a488aac{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/hadoop.log.dir/,STOPPED} 2024-11-25T19:26:39,688 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T19:26:39,716 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T19:26:39,723 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=153 (was 79) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33701 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40559 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40559 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40559 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40559 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:40559 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40559 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40559 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$894/0x00007f9714bf55b8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:33701 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$894/0x00007f9714bf55b8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40559 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40559 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40559 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:40559 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=452 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=122 (was 178), ProcessCount=11 (was 11), AvailableMemoryMB=5706 (was 5801) 2024-11-25T19:26:39,729 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=153, OpenFileDescriptor=452, MaxFileDescriptor=1048576, SystemLoadAverage=122, ProcessCount=11, AvailableMemoryMB=5706 2024-11-25T19:26:39,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T19:26:39,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/hadoop.log.dir so I do NOT create it in target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac 2024-11-25T19:26:39,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7910f54c-b792-4e1c-e85a-5ab67b554044/hadoop.tmp.dir so I do NOT create it in target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac 2024-11-25T19:26:39,730 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/cluster_e7685679-4b75-9f40-a868-f670a5ae705c, deleteOnExit=true 2024-11-25T19:26:39,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-25T19:26:39,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/test.cache.data in system properties and HBase conf 2024-11-25T19:26:39,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T19:26:39,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/hadoop.log.dir in system properties and HBase conf 2024-11-25T19:26:39,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T19:26:39,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T19:26:39,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T19:26:39,730 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T19:26:39,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T19:26:39,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T19:26:39,731 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T19:26:39,731 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T19:26:39,731 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T19:26:39,731 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T19:26:39,731 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T19:26:39,731 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T19:26:39,731 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T19:26:39,731 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/nfs.dump.dir in system properties and HBase conf 2024-11-25T19:26:39,731 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/java.io.tmpdir in system properties and HBase conf 2024-11-25T19:26:39,731 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T19:26:39,731 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T19:26:39,731 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T19:26:39,743 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T19:26:39,760 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T19:26:39,771 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:39,771 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:39,771 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:39,772 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:39,772 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:39,772 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:39,776 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:39,776 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:39,777 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:39,778 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:39,805 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:26:39,810 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:26:39,811 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:26:39,811 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:26:39,811 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T19:26:39,812 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:26:39,812 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c5497db{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:26:39,812 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@38a1581{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:26:39,904 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3d54b888{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/java.io.tmpdir/jetty-localhost-40175-hadoop-hdfs-3_4_1-tests_jar-_-any-3349659538042119365/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T19:26:39,905 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@24710539{HTTP/1.1, (http/1.1)}{localhost:40175} 2024-11-25T19:26:39,905 INFO [Time-limited test {}] server.Server(415): Started @150493ms 2024-11-25T19:26:39,915 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T19:26:39,962 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:26:39,965 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:26:39,966 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:26:39,966 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:26:39,966 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T19:26:39,967 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b810f17{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:26:39,967 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e97eedf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:26:40,060 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5cf0fa2a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/java.io.tmpdir/jetty-localhost-33903-hadoop-hdfs-3_4_1-tests_jar-_-any-5421309567457309079/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:26:40,061 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7fd8c23{HTTP/1.1, (http/1.1)}{localhost:33903} 2024-11-25T19:26:40,061 INFO [Time-limited test {}] server.Server(415): Started @150649ms 2024-11-25T19:26:40,063 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T19:26:40,094 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:26:40,099 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:26:40,101 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:26:40,101 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:26:40,101 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T19:26:40,102 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@14a79ae9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:26:40,103 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1974987b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:26:40,122 WARN [Thread-1180 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/cluster_e7685679-4b75-9f40-a868-f670a5ae705c/data/data1/current/BP-1990144314-172.17.0.2-1732562799754/current, will proceed with Du for space computation calculation, 2024-11-25T19:26:40,122 WARN [Thread-1181 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/cluster_e7685679-4b75-9f40-a868-f670a5ae705c/data/data2/current/BP-1990144314-172.17.0.2-1732562799754/current, will proceed with Du for space computation calculation, 2024-11-25T19:26:40,137 WARN [Thread-1159 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T19:26:40,139 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x504ef75cb0adbc8b with lease ID 0x93a34b58ead49f02: Processing first storage report for DS-f13b0552-e90a-4b76-906c-15ce447e345b from datanode DatanodeRegistration(127.0.0.1:36655, datanodeUuid=74458731-997c-4e21-af71-241618d82c62, infoPort=46595, infoSecurePort=0, ipcPort=38727, storageInfo=lv=-57;cid=testClusterID;nsid=2031381364;c=1732562799754) 2024-11-25T19:26:40,140 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x504ef75cb0adbc8b with lease ID 0x93a34b58ead49f02: from storage DS-f13b0552-e90a-4b76-906c-15ce447e345b node DatanodeRegistration(127.0.0.1:36655, datanodeUuid=74458731-997c-4e21-af71-241618d82c62, infoPort=46595, infoSecurePort=0, ipcPort=38727, storageInfo=lv=-57;cid=testClusterID;nsid=2031381364;c=1732562799754), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:26:40,140 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x504ef75cb0adbc8b with lease ID 0x93a34b58ead49f02: Processing first storage report for DS-0949797b-4da7-4541-8047-23d4b2e49c73 from datanode DatanodeRegistration(127.0.0.1:36655, datanodeUuid=74458731-997c-4e21-af71-241618d82c62, infoPort=46595, infoSecurePort=0, ipcPort=38727, storageInfo=lv=-57;cid=testClusterID;nsid=2031381364;c=1732562799754) 2024-11-25T19:26:40,140 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x504ef75cb0adbc8b with lease ID 0x93a34b58ead49f02: from storage DS-0949797b-4da7-4541-8047-23d4b2e49c73 node DatanodeRegistration(127.0.0.1:36655, datanodeUuid=74458731-997c-4e21-af71-241618d82c62, infoPort=46595, infoSecurePort=0, ipcPort=38727, storageInfo=lv=-57;cid=testClusterID;nsid=2031381364;c=1732562799754), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:26:40,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:40,200 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1cd5425{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/java.io.tmpdir/jetty-localhost-44437-hadoop-hdfs-3_4_1-tests_jar-_-any-762483976103831486/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:26:40,200 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6e63449e{HTTP/1.1, (http/1.1)}{localhost:44437} 2024-11-25T19:26:40,200 INFO [Time-limited test {}] server.Server(415): Started @150788ms 2024-11-25T19:26:40,201 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T19:26:40,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:40,259 WARN [Thread-1206 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/cluster_e7685679-4b75-9f40-a868-f670a5ae705c/data/data3/current/BP-1990144314-172.17.0.2-1732562799754/current, will proceed with Du for space computation calculation, 2024-11-25T19:26:40,259 WARN [Thread-1207 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/cluster_e7685679-4b75-9f40-a868-f670a5ae705c/data/data4/current/BP-1990144314-172.17.0.2-1732562799754/current, will proceed with Du for space computation calculation, 2024-11-25T19:26:40,277 WARN [Thread-1195 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T19:26:40,279 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa826dcf50efd4f27 with lease ID 0x93a34b58ead49f03: Processing first storage report for DS-10949356-e571-40d5-89df-7c039cd88a0a from datanode DatanodeRegistration(127.0.0.1:38125, datanodeUuid=9002a095-cc2f-4ce9-b0b1-e21f4c641699, infoPort=42355, infoSecurePort=0, ipcPort=38397, storageInfo=lv=-57;cid=testClusterID;nsid=2031381364;c=1732562799754) 2024-11-25T19:26:40,279 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa826dcf50efd4f27 with lease ID 0x93a34b58ead49f03: from storage DS-10949356-e571-40d5-89df-7c039cd88a0a node DatanodeRegistration(127.0.0.1:38125, datanodeUuid=9002a095-cc2f-4ce9-b0b1-e21f4c641699, infoPort=42355, infoSecurePort=0, ipcPort=38397, storageInfo=lv=-57;cid=testClusterID;nsid=2031381364;c=1732562799754), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:26:40,279 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa826dcf50efd4f27 with lease ID 0x93a34b58ead49f03: Processing first storage report for DS-375c955e-1fd4-42f3-84ab-356c4e08868b from datanode DatanodeRegistration(127.0.0.1:38125, datanodeUuid=9002a095-cc2f-4ce9-b0b1-e21f4c641699, infoPort=42355, infoSecurePort=0, ipcPort=38397, storageInfo=lv=-57;cid=testClusterID;nsid=2031381364;c=1732562799754) 2024-11-25T19:26:40,279 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa826dcf50efd4f27 with lease ID 0x93a34b58ead49f03: from storage DS-375c955e-1fd4-42f3-84ab-356c4e08868b node DatanodeRegistration(127.0.0.1:38125, datanodeUuid=9002a095-cc2f-4ce9-b0b1-e21f4c641699, infoPort=42355, infoSecurePort=0, ipcPort=38397, storageInfo=lv=-57;cid=testClusterID;nsid=2031381364;c=1732562799754), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:26:40,326 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac 2024-11-25T19:26:40,330 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/cluster_e7685679-4b75-9f40-a868-f670a5ae705c/zookeeper_0, clientPort=63006, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/cluster_e7685679-4b75-9f40-a868-f670a5ae705c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/cluster_e7685679-4b75-9f40-a868-f670a5ae705c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T19:26:40,331 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63006 2024-11-25T19:26:40,331 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:26:40,333 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:26:40,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38125 is added to blk_1073741825_1001 (size=7) 2024-11-25T19:26:40,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36655 is added to blk_1073741825_1001 (size=7) 2024-11-25T19:26:40,344 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef with version=8 2024-11-25T19:26:40,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/hbase-staging 2024-11-25T19:26:40,346 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6ef6ccb75414:0 server-side Connection retries=45 2024-11-25T19:26:40,346 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:26:40,346 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T19:26:40,346 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T19:26:40,346 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:26:40,346 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T19:26:40,346 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T19:26:40,347 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T19:26:40,347 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33297 2024-11-25T19:26:40,349 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33297 connecting to ZooKeeper ensemble=127.0.0.1:63006 2024-11-25T19:26:40,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:332970x0, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T19:26:40,354 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33297-0x1007859ab370000 connected 2024-11-25T19:26:40,367 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:26:40,369 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:26:40,373 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:26:40,373 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef, hbase.cluster.distributed=false 2024-11-25T19:26:40,375 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T19:26:40,375 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33297 2024-11-25T19:26:40,375 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33297 2024-11-25T19:26:40,376 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33297 2024-11-25T19:26:40,376 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33297 2024-11-25T19:26:40,376 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33297 2024-11-25T19:26:40,390 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6ef6ccb75414:0 server-side Connection retries=45 2024-11-25T19:26:40,390 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:26:40,390 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T19:26:40,390 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T19:26:40,390 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:26:40,390 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T19:26:40,390 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T19:26:40,391 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T19:26:40,392 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35585 2024-11-25T19:26:40,393 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35585 connecting to ZooKeeper ensemble=127.0.0.1:63006 2024-11-25T19:26:40,394 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:26:40,395 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:26:40,399 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:355850x0, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T19:26:40,399 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35585-0x1007859ab370001 connected 2024-11-25T19:26:40,399 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35585-0x1007859ab370001, quorum=127.0.0.1:63006, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:26:40,399 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T19:26:40,400 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T19:26:40,400 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35585-0x1007859ab370001, quorum=127.0.0.1:63006, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T19:26:40,401 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35585-0x1007859ab370001, quorum=127.0.0.1:63006, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T19:26:40,405 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35585 2024-11-25T19:26:40,405 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35585 2024-11-25T19:26:40,405 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35585 2024-11-25T19:26:40,407 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35585 2024-11-25T19:26:40,408 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35585 2024-11-25T19:26:40,424 DEBUG [M:0;6ef6ccb75414:33297 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6ef6ccb75414:33297 2024-11-25T19:26:40,424 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6ef6ccb75414,33297,1732562800346 2024-11-25T19:26:40,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x1007859ab370001, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:26:40,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:26:40,426 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6ef6ccb75414,33297,1732562800346 2024-11-25T19:26:40,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:26:40,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x1007859ab370001, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T19:26:40,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x1007859ab370001, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:26:40,428 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T19:26:40,428 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6ef6ccb75414,33297,1732562800346 from backup master directory 2024-11-25T19:26:40,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6ef6ccb75414,33297,1732562800346 2024-11-25T19:26:40,429 WARN [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T19:26:40,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x1007859ab370001, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:26:40,429 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6ef6ccb75414,33297,1732562800346 2024-11-25T19:26:40,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:26:40,433 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/hbase.id] with ID: 43d52222-7ab3-481c-b1bf-90ac8a501bfc 2024-11-25T19:26:40,433 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/.tmp/hbase.id 2024-11-25T19:26:40,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38125 is added to blk_1073741826_1002 (size=42) 2024-11-25T19:26:40,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36655 is added to blk_1073741826_1002 (size=42) 2024-11-25T19:26:40,439 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/.tmp/hbase.id]:[hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/hbase.id] 2024-11-25T19:26:40,451 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:26:40,451 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T19:26:40,452 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-25T19:26:40,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:26:40,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x1007859ab370001, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:26:40,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36655 is added to blk_1073741827_1003 (size=196) 2024-11-25T19:26:40,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38125 is added to blk_1073741827_1003 (size=196) 2024-11-25T19:26:40,465 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T19:26:40,466 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T19:26:40,466 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:26:40,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36655 is added to blk_1073741828_1004 (size=1189) 2024-11-25T19:26:40,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38125 is added to blk_1073741828_1004 (size=1189) 2024-11-25T19:26:40,475 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store 2024-11-25T19:26:40,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36655 is added to blk_1073741829_1005 (size=34) 2024-11-25T19:26:40,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38125 is added to blk_1073741829_1005 (size=34) 2024-11-25T19:26:40,483 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:26:40,483 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T19:26:40,483 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:26:40,483 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:26:40,483 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T19:26:40,483 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:26:40,483 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:26:40,483 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732562800483Disabling compacts and flushes for region at 1732562800483Disabling writes for close at 1732562800483Writing region close event to WAL at 1732562800483Closed at 1732562800483 2024-11-25T19:26:40,484 WARN [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/.initializing 2024-11-25T19:26:40,484 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/WALs/6ef6ccb75414,33297,1732562800346 2024-11-25T19:26:40,487 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ef6ccb75414%2C33297%2C1732562800346, suffix=, logDir=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/WALs/6ef6ccb75414,33297,1732562800346, archiveDir=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/oldWALs, maxLogs=10 2024-11-25T19:26:40,487 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C33297%2C1732562800346.1732562800487 2024-11-25T19:26:40,492 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/WALs/6ef6ccb75414,33297,1732562800346/6ef6ccb75414%2C33297%2C1732562800346.1732562800487 2024-11-25T19:26:40,493 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46595:46595),(127.0.0.1/127.0.0.1:42355:42355)] 2024-11-25T19:26:40,496 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T19:26:40,497 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:26:40,497 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:26:40,497 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:26:40,499 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:26:40,501 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T19:26:40,501 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:26:40,501 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:26:40,501 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:26:40,503 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T19:26:40,503 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:26:40,503 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:26:40,503 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:26:40,505 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T19:26:40,505 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:26:40,505 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:26:40,505 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:26:40,507 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T19:26:40,507 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:26:40,507 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:26:40,507 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:26:40,508 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:26:40,508 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:26:40,510 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:26:40,510 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:26:40,510 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T19:26:40,511 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:26:40,514 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T19:26:40,514 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=829977, jitterRate=0.05537132918834686}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T19:26:40,515 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732562800497Initializing all the Stores at 1732562800498 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562800498Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562800499 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562800499Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562800499Cleaning up temporary data from old regions at 1732562800510 (+11 ms)Region opened successfully at 1732562800515 (+5 ms) 2024-11-25T19:26:40,516 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T19:26:40,520 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15f5973, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ef6ccb75414/172.17.0.2:0 2024-11-25T19:26:40,521 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T19:26:40,521 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T19:26:40,521 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T19:26:40,521 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T19:26:40,521 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-25T19:26:40,522 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-25T19:26:40,522 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T19:26:40,526 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T19:26:40,527 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T19:26:40,528 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T19:26:40,528 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T19:26:40,529 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T19:26:40,529 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T19:26:40,530 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T19:26:40,531 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T19:26:40,531 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T19:26:40,532 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T19:26:40,533 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T19:26:40,535 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T19:26:40,536 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T19:26:40,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x1007859ab370001, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T19:26:40,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T19:26:40,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x1007859ab370001, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:26:40,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:26:40,537 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6ef6ccb75414,33297,1732562800346, sessionid=0x1007859ab370000, setting cluster-up flag (Was=false) 2024-11-25T19:26:40,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x1007859ab370001, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:26:40,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:26:40,542 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T19:26:40,542 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6ef6ccb75414,33297,1732562800346 2024-11-25T19:26:40,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x1007859ab370001, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:26:40,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:26:40,547 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T19:26:40,548 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6ef6ccb75414,33297,1732562800346 2024-11-25T19:26:40,549 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T19:26:40,550 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T19:26:40,551 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T19:26:40,551 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T19:26:40,551 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6ef6ccb75414,33297,1732562800346 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T19:26:40,553 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:26:40,553 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:26:40,553 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:26:40,553 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:26:40,553 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6ef6ccb75414:0, corePoolSize=10, maxPoolSize=10 2024-11-25T19:26:40,553 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:26:40,553 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=2, maxPoolSize=2 2024-11-25T19:26:40,553 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:26:40,558 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732562830558 2024-11-25T19:26:40,558 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T19:26:40,558 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T19:26:40,558 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T19:26:40,559 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T19:26:40,559 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T19:26:40,559 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T19:26:40,559 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T19:26:40,559 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T19:26:40,560 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:26:40,560 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T19:26:40,561 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T19:26:40,561 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T19:26:40,561 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T19:26:40,561 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T19:26:40,565 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T19:26:40,565 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T19:26:40,565 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.large.0-1732562800565,5,FailOnTimeoutGroup] 2024-11-25T19:26:40,565 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.small.0-1732562800565,5,FailOnTimeoutGroup] 2024-11-25T19:26:40,565 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T19:26:40,566 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T19:26:40,566 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T19:26:40,566 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T19:26:40,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36655 is added to blk_1073741831_1007 (size=1321) 2024-11-25T19:26:40,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38125 is added to blk_1073741831_1007 (size=1321) 2024-11-25T19:26:40,571 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T19:26:40,571 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef 2024-11-25T19:26:40,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38125 is added to blk_1073741832_1008 (size=32) 2024-11-25T19:26:40,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36655 is added to blk_1073741832_1008 (size=32) 2024-11-25T19:26:40,586 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:26:40,587 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T19:26:40,589 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T19:26:40,589 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:26:40,590 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:26:40,590 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T19:26:40,591 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T19:26:40,591 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:26:40,592 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:26:40,592 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T19:26:40,594 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T19:26:40,594 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:26:40,594 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:26:40,594 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T19:26:40,596 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T19:26:40,596 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:26:40,596 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:26:40,597 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T19:26:40,597 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/hbase/meta/1588230740 2024-11-25T19:26:40,598 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/hbase/meta/1588230740 2024-11-25T19:26:40,599 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T19:26:40,599 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T19:26:40,599 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T19:26:40,601 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T19:26:40,603 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T19:26:40,603 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=851544, jitterRate=0.08279520273208618}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T19:26:40,604 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732562800586Initializing all the Stores at 1732562800587 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562800587Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562800587Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562800587Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562800587Cleaning up temporary data from old regions at 1732562800599 (+12 ms)Region opened successfully at 1732562800604 (+5 ms) 2024-11-25T19:26:40,604 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T19:26:40,604 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T19:26:40,604 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T19:26:40,604 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T19:26:40,604 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T19:26:40,604 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T19:26:40,604 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732562800604Disabling compacts and flushes for region at 1732562800604Disabling writes for close at 1732562800604Writing region close event to WAL at 1732562800604Closed at 1732562800604 2024-11-25T19:26:40,606 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T19:26:40,606 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T19:26:40,606 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T19:26:40,607 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T19:26:40,608 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T19:26:40,610 INFO [RS:0;6ef6ccb75414:35585 {}] regionserver.HRegionServer(746): ClusterId : 43d52222-7ab3-481c-b1bf-90ac8a501bfc 2024-11-25T19:26:40,610 DEBUG [RS:0;6ef6ccb75414:35585 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T19:26:40,612 DEBUG [RS:0;6ef6ccb75414:35585 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T19:26:40,612 DEBUG [RS:0;6ef6ccb75414:35585 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T19:26:40,613 DEBUG [RS:0;6ef6ccb75414:35585 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T19:26:40,613 DEBUG [RS:0;6ef6ccb75414:35585 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2bafed9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ef6ccb75414/172.17.0.2:0 2024-11-25T19:26:40,630 DEBUG [RS:0;6ef6ccb75414:35585 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6ef6ccb75414:35585 2024-11-25T19:26:40,630 INFO [RS:0;6ef6ccb75414:35585 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T19:26:40,630 INFO [RS:0;6ef6ccb75414:35585 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T19:26:40,630 DEBUG [RS:0;6ef6ccb75414:35585 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T19:26:40,631 INFO [RS:0;6ef6ccb75414:35585 {}] regionserver.HRegionServer(2659): reportForDuty to master=6ef6ccb75414,33297,1732562800346 with port=35585, startcode=1732562800390 2024-11-25T19:26:40,631 DEBUG [RS:0;6ef6ccb75414:35585 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T19:26:40,633 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38223, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T19:26:40,633 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33297 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6ef6ccb75414,35585,1732562800390 2024-11-25T19:26:40,633 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33297 {}] master.ServerManager(517): Registering regionserver=6ef6ccb75414,35585,1732562800390 2024-11-25T19:26:40,635 DEBUG [RS:0;6ef6ccb75414:35585 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef 2024-11-25T19:26:40,635 DEBUG [RS:0;6ef6ccb75414:35585 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35789 2024-11-25T19:26:40,635 DEBUG [RS:0;6ef6ccb75414:35585 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T19:26:40,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T19:26:40,637 DEBUG [RS:0;6ef6ccb75414:35585 {}] zookeeper.ZKUtil(111): regionserver:35585-0x1007859ab370001, quorum=127.0.0.1:63006, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6ef6ccb75414,35585,1732562800390 2024-11-25T19:26:40,637 WARN [RS:0;6ef6ccb75414:35585 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T19:26:40,637 INFO [RS:0;6ef6ccb75414:35585 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:26:40,637 DEBUG [RS:0;6ef6ccb75414:35585 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390 2024-11-25T19:26:40,637 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6ef6ccb75414,35585,1732562800390] 2024-11-25T19:26:40,640 INFO [RS:0;6ef6ccb75414:35585 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T19:26:40,642 INFO [RS:0;6ef6ccb75414:35585 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T19:26:40,642 INFO [RS:0;6ef6ccb75414:35585 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T19:26:40,642 INFO [RS:0;6ef6ccb75414:35585 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:26:40,645 INFO [RS:0;6ef6ccb75414:35585 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T19:26:40,646 INFO [RS:0;6ef6ccb75414:35585 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T19:26:40,646 INFO [RS:0;6ef6ccb75414:35585 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T19:26:40,646 DEBUG [RS:0;6ef6ccb75414:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:26:40,646 DEBUG [RS:0;6ef6ccb75414:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:26:40,646 DEBUG [RS:0;6ef6ccb75414:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:26:40,646 DEBUG [RS:0;6ef6ccb75414:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:26:40,646 DEBUG [RS:0;6ef6ccb75414:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:26:40,646 DEBUG [RS:0;6ef6ccb75414:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6ef6ccb75414:0, corePoolSize=2, maxPoolSize=2 2024-11-25T19:26:40,647 DEBUG [RS:0;6ef6ccb75414:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:26:40,647 DEBUG [RS:0;6ef6ccb75414:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:26:40,647 DEBUG [RS:0;6ef6ccb75414:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:26:40,647 DEBUG [RS:0;6ef6ccb75414:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:26:40,647 DEBUG [RS:0;6ef6ccb75414:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:26:40,647 DEBUG [RS:0;6ef6ccb75414:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:26:40,647 DEBUG [RS:0;6ef6ccb75414:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6ef6ccb75414:0, corePoolSize=3, maxPoolSize=3 2024-11-25T19:26:40,647 DEBUG [RS:0;6ef6ccb75414:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0, corePoolSize=3, maxPoolSize=3 2024-11-25T19:26:40,649 INFO [RS:0;6ef6ccb75414:35585 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T19:26:40,649 INFO [RS:0;6ef6ccb75414:35585 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T19:26:40,649 INFO [RS:0;6ef6ccb75414:35585 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:26:40,649 INFO [RS:0;6ef6ccb75414:35585 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T19:26:40,649 INFO [RS:0;6ef6ccb75414:35585 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T19:26:40,649 INFO [RS:0;6ef6ccb75414:35585 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,35585,1732562800390-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T19:26:40,665 INFO [RS:0;6ef6ccb75414:35585 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T19:26:40,665 INFO [RS:0;6ef6ccb75414:35585 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,35585,1732562800390-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:26:40,665 INFO [RS:0;6ef6ccb75414:35585 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:26:40,665 INFO [RS:0;6ef6ccb75414:35585 {}] regionserver.Replication(171): 6ef6ccb75414,35585,1732562800390 started 2024-11-25T19:26:40,679 INFO [RS:0;6ef6ccb75414:35585 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:26:40,680 INFO [RS:0;6ef6ccb75414:35585 {}] regionserver.HRegionServer(1482): Serving as 6ef6ccb75414,35585,1732562800390, RpcServer on 6ef6ccb75414/172.17.0.2:35585, sessionid=0x1007859ab370001 2024-11-25T19:26:40,680 DEBUG [RS:0;6ef6ccb75414:35585 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T19:26:40,680 DEBUG [RS:0;6ef6ccb75414:35585 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6ef6ccb75414,35585,1732562800390 2024-11-25T19:26:40,680 DEBUG [RS:0;6ef6ccb75414:35585 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ef6ccb75414,35585,1732562800390' 2024-11-25T19:26:40,680 DEBUG [RS:0;6ef6ccb75414:35585 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T19:26:40,681 DEBUG [RS:0;6ef6ccb75414:35585 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T19:26:40,681 DEBUG [RS:0;6ef6ccb75414:35585 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T19:26:40,681 DEBUG [RS:0;6ef6ccb75414:35585 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T19:26:40,681 DEBUG [RS:0;6ef6ccb75414:35585 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6ef6ccb75414,35585,1732562800390 2024-11-25T19:26:40,681 DEBUG [RS:0;6ef6ccb75414:35585 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ef6ccb75414,35585,1732562800390' 2024-11-25T19:26:40,681 DEBUG [RS:0;6ef6ccb75414:35585 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T19:26:40,681 DEBUG [RS:0;6ef6ccb75414:35585 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T19:26:40,682 DEBUG [RS:0;6ef6ccb75414:35585 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T19:26:40,682 INFO [RS:0;6ef6ccb75414:35585 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T19:26:40,682 INFO [RS:0;6ef6ccb75414:35585 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T19:26:40,759 WARN [6ef6ccb75414:33297 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-25T19:26:40,785 INFO [RS:0;6ef6ccb75414:35585 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ef6ccb75414%2C35585%2C1732562800390, suffix=, logDir=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390, archiveDir=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/oldWALs, maxLogs=32 2024-11-25T19:26:40,786 INFO [RS:0;6ef6ccb75414:35585 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C35585%2C1732562800390.1732562800785 2024-11-25T19:26:40,793 INFO [RS:0;6ef6ccb75414:35585 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562800785 2024-11-25T19:26:40,795 DEBUG [RS:0;6ef6ccb75414:35585 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42355:42355),(127.0.0.1/127.0.0.1:46595:46595)] 2024-11-25T19:26:41,009 DEBUG [6ef6ccb75414:33297 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-25T19:26:41,010 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6ef6ccb75414,35585,1732562800390 2024-11-25T19:26:41,011 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6ef6ccb75414,35585,1732562800390, state=OPENING 2024-11-25T19:26:41,013 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T19:26:41,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x1007859ab370001, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:26:41,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:26:41,015 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T19:26:41,015 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:26:41,015 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:26:41,015 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6ef6ccb75414,35585,1732562800390}] 2024-11-25T19:26:41,170 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T19:26:41,174 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53517, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T19:26:41,178 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T19:26:41,179 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:26:41,181 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ef6ccb75414%2C35585%2C1732562800390.meta, suffix=.meta, logDir=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390, archiveDir=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/oldWALs, maxLogs=32 2024-11-25T19:26:41,182 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C35585%2C1732562800390.meta.1732562801181.meta 2024-11-25T19:26:41,190 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.meta.1732562801181.meta 2024-11-25T19:26:41,197 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42355:42355),(127.0.0.1/127.0.0.1:46595:46595)] 2024-11-25T19:26:41,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:41,199 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T19:26:41,199 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T19:26:41,199 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T19:26:41,199 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T19:26:41,199 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T19:26:41,200 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:26:41,200 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T19:26:41,200 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T19:26:41,201 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T19:26:41,202 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T19:26:41,202 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:26:41,203 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:26:41,203 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T19:26:41,204 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T19:26:41,204 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:26:41,205 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:26:41,205 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T19:26:41,205 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T19:26:41,205 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:26:41,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:41,206 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:26:41,206 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T19:26:41,206 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T19:26:41,207 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:26:41,207 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:26:41,207 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T19:26:41,208 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/hbase/meta/1588230740 2024-11-25T19:26:41,209 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/hbase/meta/1588230740 2024-11-25T19:26:41,210 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T19:26:41,210 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T19:26:41,211 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T19:26:41,212 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T19:26:41,213 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=732031, jitterRate=-0.06917555630207062}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T19:26:41,213 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T19:26:41,214 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732562801200Writing region info on filesystem at 1732562801200Initializing all the Stores at 1732562801201 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562801201Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562801201Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562801201Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562801201Cleaning up temporary data from old regions at 1732562801210 (+9 ms)Running coprocessor post-open hooks at 1732562801213 (+3 ms)Region opened successfully at 1732562801214 (+1 ms) 2024-11-25T19:26:41,215 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732562801169 2024-11-25T19:26:41,217 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T19:26:41,217 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T19:26:41,218 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6ef6ccb75414,35585,1732562800390 2024-11-25T19:26:41,219 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6ef6ccb75414,35585,1732562800390, state=OPEN 2024-11-25T19:26:41,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T19:26:41,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x1007859ab370001, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T19:26:41,221 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6ef6ccb75414,35585,1732562800390 2024-11-25T19:26:41,221 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:26:41,221 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:26:41,224 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T19:26:41,224 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6ef6ccb75414,35585,1732562800390 in 206 msec 2024-11-25T19:26:41,227 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T19:26:41,227 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 618 msec 2024-11-25T19:26:41,227 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T19:26:41,228 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T19:26:41,229 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T19:26:41,229 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6ef6ccb75414,35585,1732562800390, seqNum=-1] 2024-11-25T19:26:41,229 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T19:26:41,230 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53749, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T19:26:41,235 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 685 msec 2024-11-25T19:26:41,236 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732562801236, completionTime=-1 2024-11-25T19:26:41,236 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-25T19:26:41,236 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T19:26:41,238 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-25T19:26:41,238 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732562861238 2024-11-25T19:26:41,238 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732562921238 2024-11-25T19:26:41,238 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-25T19:26:41,238 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,33297,1732562800346-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:26:41,239 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,33297,1732562800346-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:26:41,239 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,33297,1732562800346-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:26:41,239 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6ef6ccb75414:33297, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:26:41,239 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T19:26:41,239 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T19:26:41,241 DEBUG [master/6ef6ccb75414:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T19:26:41,243 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.814sec 2024-11-25T19:26:41,243 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T19:26:41,243 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T19:26:41,243 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T19:26:41,243 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T19:26:41,243 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T19:26:41,243 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,33297,1732562800346-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T19:26:41,243 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,33297,1732562800346-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T19:26:41,247 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T19:26:41,247 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T19:26:41,247 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,33297,1732562800346-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:26:41,311 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ac39a9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T19:26:41,311 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6ef6ccb75414,33297,-1 for getting cluster id 2024-11-25T19:26:41,311 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T19:26:41,314 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '43d52222-7ab3-481c-b1bf-90ac8a501bfc' 2024-11-25T19:26:41,314 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T19:26:41,315 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "43d52222-7ab3-481c-b1bf-90ac8a501bfc" 2024-11-25T19:26:41,315 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@145c5c71, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T19:26:41,315 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6ef6ccb75414,33297,-1] 2024-11-25T19:26:41,315 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T19:26:41,316 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:26:41,318 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34208, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T19:26:41,319 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2999d795, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T19:26:41,319 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T19:26:41,321 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6ef6ccb75414,35585,1732562800390, seqNum=-1] 2024-11-25T19:26:41,321 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T19:26:41,323 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57708, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T19:26:41,324 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6ef6ccb75414,33297,1732562800346 2024-11-25T19:26:41,325 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:26:41,327 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-25T19:26:41,327 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-25T19:26:41,327 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-25T19:26:41,328 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-25T19:26:41,329 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 6ef6ccb75414,33297,1732562800346 2024-11-25T19:26:41,329 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@174170a3 2024-11-25T19:26:41,329 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T19:26:41,331 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34216, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T19:26:41,331 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33297 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-25T19:26:41,331 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33297 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-25T19:26:41,332 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33297 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T19:26:41,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33297 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-25T19:26:41,335 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T19:26:41,335 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:26:41,335 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33297 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-25T19:26:41,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33297 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T19:26:41,336 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T19:26:41,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36655 is added to blk_1073741835_1011 (size=395) 2024-11-25T19:26:41,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38125 is added to blk_1073741835_1011 (size=395) 2024-11-25T19:26:41,345 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c550580bf5c247fe7ffa5d0f194b423c, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef 2024-11-25T19:26:41,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36655 is added to blk_1073741836_1012 (size=78) 2024-11-25T19:26:41,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38125 is added to blk_1073741836_1012 (size=78) 2024-11-25T19:26:41,354 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:26:41,354 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing c550580bf5c247fe7ffa5d0f194b423c, disabling compactions & flushes 2024-11-25T19:26:41,354 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c. 2024-11-25T19:26:41,354 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c. 2024-11-25T19:26:41,354 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c. after waiting 0 ms 2024-11-25T19:26:41,354 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c. 2024-11-25T19:26:41,354 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c. 2024-11-25T19:26:41,354 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for c550580bf5c247fe7ffa5d0f194b423c: Waiting for close lock at 1732562801354Disabling compacts and flushes for region at 1732562801354Disabling writes for close at 1732562801354Writing region close event to WAL at 1732562801354Closed at 1732562801354 2024-11-25T19:26:41,356 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T19:26:41,356 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732562801356"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732562801356"}]},"ts":"1732562801356"} 2024-11-25T19:26:41,358 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-25T19:26:41,359 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T19:26:41,359 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732562801359"}]},"ts":"1732562801359"} 2024-11-25T19:26:41,361 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-25T19:26:41,362 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=c550580bf5c247fe7ffa5d0f194b423c, ASSIGN}] 2024-11-25T19:26:41,363 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=c550580bf5c247fe7ffa5d0f194b423c, ASSIGN 2024-11-25T19:26:41,364 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=c550580bf5c247fe7ffa5d0f194b423c, ASSIGN; state=OFFLINE, location=6ef6ccb75414,35585,1732562800390; forceNewPlan=false, retain=false 2024-11-25T19:26:41,515 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c550580bf5c247fe7ffa5d0f194b423c, regionState=OPENING, regionLocation=6ef6ccb75414,35585,1732562800390 2024-11-25T19:26:41,518 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=c550580bf5c247fe7ffa5d0f194b423c, ASSIGN because future has completed 2024-11-25T19:26:41,519 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c550580bf5c247fe7ffa5d0f194b423c, server=6ef6ccb75414,35585,1732562800390}] 2024-11-25T19:26:41,679 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c. 2024-11-25T19:26:41,679 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c550580bf5c247fe7ffa5d0f194b423c, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c.', STARTKEY => '', ENDKEY => ''} 2024-11-25T19:26:41,680 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart c550580bf5c247fe7ffa5d0f194b423c 2024-11-25T19:26:41,680 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:26:41,680 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c550580bf5c247fe7ffa5d0f194b423c 2024-11-25T19:26:41,680 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c550580bf5c247fe7ffa5d0f194b423c 2024-11-25T19:26:41,682 INFO [StoreOpener-c550580bf5c247fe7ffa5d0f194b423c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c550580bf5c247fe7ffa5d0f194b423c 2024-11-25T19:26:41,683 INFO [StoreOpener-c550580bf5c247fe7ffa5d0f194b423c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c550580bf5c247fe7ffa5d0f194b423c columnFamilyName info 2024-11-25T19:26:41,683 DEBUG [StoreOpener-c550580bf5c247fe7ffa5d0f194b423c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:26:41,684 INFO [StoreOpener-c550580bf5c247fe7ffa5d0f194b423c-1 {}] regionserver.HStore(327): Store=c550580bf5c247fe7ffa5d0f194b423c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:26:41,684 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c550580bf5c247fe7ffa5d0f194b423c 2024-11-25T19:26:41,685 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/default/TestLogRolling-testLogRollOnPipelineRestart/c550580bf5c247fe7ffa5d0f194b423c 2024-11-25T19:26:41,685 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/default/TestLogRolling-testLogRollOnPipelineRestart/c550580bf5c247fe7ffa5d0f194b423c 2024-11-25T19:26:41,686 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c550580bf5c247fe7ffa5d0f194b423c 2024-11-25T19:26:41,686 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c550580bf5c247fe7ffa5d0f194b423c 2024-11-25T19:26:41,687 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c550580bf5c247fe7ffa5d0f194b423c 2024-11-25T19:26:41,690 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/default/TestLogRolling-testLogRollOnPipelineRestart/c550580bf5c247fe7ffa5d0f194b423c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T19:26:41,690 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c550580bf5c247fe7ffa5d0f194b423c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=755445, jitterRate=-0.03940269351005554}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T19:26:41,691 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c550580bf5c247fe7ffa5d0f194b423c 2024-11-25T19:26:41,691 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c550580bf5c247fe7ffa5d0f194b423c: Running coprocessor pre-open hook at 1732562801680Writing region info on filesystem at 1732562801680Initializing all the Stores at 1732562801681 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562801681Cleaning up temporary data from old regions at 1732562801686 (+5 ms)Running coprocessor post-open hooks at 1732562801691 (+5 ms)Region opened successfully at 1732562801691 2024-11-25T19:26:41,692 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c., pid=6, masterSystemTime=1732562801674 2024-11-25T19:26:41,695 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c. 2024-11-25T19:26:41,695 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c. 2024-11-25T19:26:41,695 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c550580bf5c247fe7ffa5d0f194b423c, regionState=OPEN, openSeqNum=2, regionLocation=6ef6ccb75414,35585,1732562800390 2024-11-25T19:26:41,698 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c550580bf5c247fe7ffa5d0f194b423c, server=6ef6ccb75414,35585,1732562800390 because future has completed 2024-11-25T19:26:41,702 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-25T19:26:41,702 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c550580bf5c247fe7ffa5d0f194b423c, server=6ef6ccb75414,35585,1732562800390 in 180 msec 2024-11-25T19:26:41,705 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-25T19:26:41,705 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=c550580bf5c247fe7ffa5d0f194b423c, ASSIGN in 341 msec 2024-11-25T19:26:41,706 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T19:26:41,706 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732562801706"}]},"ts":"1732562801706"} 2024-11-25T19:26:41,708 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-25T19:26:41,709 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T19:26:41,711 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 377 msec 2024-11-25T19:26:42,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:42,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:43,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:43,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:44,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:44,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:45,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:45,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:45,385 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T19:26:45,385 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-25T19:26:45,386 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-25T19:26:45,386 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-25T19:26:45,386 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T19:26:45,386 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-25T19:26:46,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:46,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:46,705 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T19:26:46,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:46,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:46,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:46,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:46,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:46,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:46,726 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:46,726 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:46,726 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:46,728 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:26:46,734 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-25T19:26:46,734 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-25T19:26:47,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:47,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:48,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:48,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:49,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:49,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:50,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:50,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:51,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:51,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:51,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33297 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T19:26:51,377 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-25T19:26:51,377 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-25T19:26:51,383 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-25T19:26:51,383 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c. 2024-11-25T19:26:51,389 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c., hostname=6ef6ccb75414,35585,1732562800390, seqNum=2] 2024-11-25T19:26:52,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:52,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:53,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:53,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:53,393 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562800785 2024-11-25T19:26:53,393 WARN [ResponseProcessor for block BP-1990144314-172.17.0.2-1732562799754:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1990144314-172.17.0.2-1732562799754:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:53,393 WARN [ResponseProcessor for block BP-1990144314-172.17.0.2-1732562799754:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1990144314-172.17.0.2-1732562799754:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:53,394 WARN [ResponseProcessor for block BP-1990144314-172.17.0.2-1732562799754:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1990144314-172.17.0.2-1732562799754:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1990144314-172.17.0.2-1732562799754:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:38125,DS-10949356-e571-40d5-89df-7c039cd88a0a,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:53,394 WARN [DataStreamer for file /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.meta.1732562801181.meta block BP-1990144314-172.17.0.2-1732562799754:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1990144314-172.17.0.2-1732562799754:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38125,DS-10949356-e571-40d5-89df-7c039cd88a0a,DISK], DatanodeInfoWithStorage[127.0.0.1:36655,DS-f13b0552-e90a-4b76-906c-15ce447e345b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38125,DS-10949356-e571-40d5-89df-7c039cd88a0a,DISK]) is bad. 2024-11-25T19:26:53,394 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1786850912_22 at /127.0.0.1:60150 [Receiving block BP-1990144314-172.17.0.2-1732562799754:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38125:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60150 dst: /127.0.0.1:38125 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:53,395 WARN [DataStreamer for file /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562800785 block BP-1990144314-172.17.0.2-1732562799754:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1990144314-172.17.0.2-1732562799754:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38125,DS-10949356-e571-40d5-89df-7c039cd88a0a,DISK], DatanodeInfoWithStorage[127.0.0.1:36655,DS-f13b0552-e90a-4b76-906c-15ce447e345b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38125,DS-10949356-e571-40d5-89df-7c039cd88a0a,DISK]) is bad. 2024-11-25T19:26:53,395 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1786850912_22 at /127.0.0.1:33460 [Receiving block BP-1990144314-172.17.0.2-1732562799754:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36655:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33460 dst: /127.0.0.1:36655 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:53,395 WARN [DataStreamer for file /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/WALs/6ef6ccb75414,33297,1732562800346/6ef6ccb75414%2C33297%2C1732562800346.1732562800487 block BP-1990144314-172.17.0.2-1732562799754:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1990144314-172.17.0.2-1732562799754:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36655,DS-f13b0552-e90a-4b76-906c-15ce447e345b,DISK], DatanodeInfoWithStorage[127.0.0.1:38125,DS-10949356-e571-40d5-89df-7c039cd88a0a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38125,DS-10949356-e571-40d5-89df-7c039cd88a0a,DISK]) is bad. 2024-11-25T19:26:53,395 WARN [PacketResponder: BP-1990144314-172.17.0.2-1732562799754:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:38125] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:53,396 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_149307764_22 at /127.0.0.1:33444 [Receiving block BP-1990144314-172.17.0.2-1732562799754:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36655:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33444 dst: /127.0.0.1:36655 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:53,396 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_149307764_22 at /127.0.0.1:60130 [Receiving block BP-1990144314-172.17.0.2-1732562799754:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38125:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60130 dst: /127.0.0.1:38125 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:53,396 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1786850912_22 at /127.0.0.1:60148 [Receiving block BP-1990144314-172.17.0.2-1732562799754:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38125:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60148 dst: /127.0.0.1:38125 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:53,396 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1786850912_22 at /127.0.0.1:33454 [Receiving block BP-1990144314-172.17.0.2-1732562799754:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36655:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33454 dst: /127.0.0.1:36655 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:53,397 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1cd5425{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:26:53,398 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6e63449e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:26:53,398 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:26:53,398 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1974987b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:26:53,398 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@14a79ae9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/hadoop.log.dir/,STOPPED} 2024-11-25T19:26:53,399 WARN [BP-1990144314-172.17.0.2-1732562799754 heartbeating to localhost/127.0.0.1:35789 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T19:26:53,399 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T19:26:53,399 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T19:26:53,399 WARN [BP-1990144314-172.17.0.2-1732562799754 heartbeating to localhost/127.0.0.1:35789 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1990144314-172.17.0.2-1732562799754 (Datanode Uuid 9002a095-cc2f-4ce9-b0b1-e21f4c641699) service to localhost/127.0.0.1:35789 2024-11-25T19:26:53,400 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/cluster_e7685679-4b75-9f40-a868-f670a5ae705c/data/data3/current/BP-1990144314-172.17.0.2-1732562799754 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:26:53,400 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/cluster_e7685679-4b75-9f40-a868-f670a5ae705c/data/data4/current/BP-1990144314-172.17.0.2-1732562799754 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:26:53,400 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T19:26:53,413 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:26:53,416 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:26:53,417 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:26:53,417 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:26:53,417 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T19:26:53,418 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b2d1260{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:26:53,419 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2cce6536{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:26:53,514 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5a0e55c1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/java.io.tmpdir/jetty-localhost-37409-hadoop-hdfs-3_4_1-tests_jar-_-any-3047787655690985911/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:26:53,514 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7aa94e72{HTTP/1.1, (http/1.1)}{localhost:37409} 2024-11-25T19:26:53,514 INFO [Time-limited test {}] server.Server(415): Started @164103ms 2024-11-25T19:26:53,516 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T19:26:53,530 WARN [ResponseProcessor for block BP-1990144314-172.17.0.2-1732562799754:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1990144314-172.17.0.2-1732562799754:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:53,530 WARN [ResponseProcessor for block BP-1990144314-172.17.0.2-1732562799754:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1990144314-172.17.0.2-1732562799754:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:53,530 WARN [ResponseProcessor for block BP-1990144314-172.17.0.2-1732562799754:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1990144314-172.17.0.2-1732562799754:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:53,531 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_149307764_22 at /127.0.0.1:36850 [Receiving block BP-1990144314-172.17.0.2-1732562799754:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36655:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36850 dst: /127.0.0.1:36655 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:53,531 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1786850912_22 at /127.0.0.1:36836 [Receiving block BP-1990144314-172.17.0.2-1732562799754:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36655:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36836 dst: /127.0.0.1:36655 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:53,531 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1786850912_22 at /127.0.0.1:36834 [Receiving block BP-1990144314-172.17.0.2-1732562799754:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36655:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36834 dst: /127.0.0.1:36655 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:53,538 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5cf0fa2a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:26:53,539 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7fd8c23{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:26:53,539 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:26:53,539 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e97eedf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:26:53,539 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b810f17{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/hadoop.log.dir/,STOPPED} 2024-11-25T19:26:53,540 WARN [BP-1990144314-172.17.0.2-1732562799754 heartbeating to localhost/127.0.0.1:35789 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T19:26:53,540 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T19:26:53,540 WARN [BP-1990144314-172.17.0.2-1732562799754 heartbeating to localhost/127.0.0.1:35789 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1990144314-172.17.0.2-1732562799754 (Datanode Uuid 74458731-997c-4e21-af71-241618d82c62) service to localhost/127.0.0.1:35789 2024-11-25T19:26:53,540 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T19:26:53,540 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/cluster_e7685679-4b75-9f40-a868-f670a5ae705c/data/data1/current/BP-1990144314-172.17.0.2-1732562799754 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:26:53,541 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/cluster_e7685679-4b75-9f40-a868-f670a5ae705c/data/data2/current/BP-1990144314-172.17.0.2-1732562799754 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:26:53,541 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T19:26:53,552 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:26:53,555 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:26:53,556 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:26:53,556 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:26:53,556 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T19:26:53,556 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70d6804b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:26:53,557 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68a8c2cd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:26:53,588 WARN [Thread-1330 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T19:26:53,591 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc848acb0d8a23bc2 with lease ID 0x93a34b58ead49f04: from storage DS-10949356-e571-40d5-89df-7c039cd88a0a node DatanodeRegistration(127.0.0.1:33061, datanodeUuid=9002a095-cc2f-4ce9-b0b1-e21f4c641699, infoPort=39031, infoSecurePort=0, ipcPort=39421, storageInfo=lv=-57;cid=testClusterID;nsid=2031381364;c=1732562799754), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-25T19:26:53,591 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc848acb0d8a23bc2 with lease ID 0x93a34b58ead49f04: from storage DS-375c955e-1fd4-42f3-84ab-356c4e08868b node DatanodeRegistration(127.0.0.1:33061, datanodeUuid=9002a095-cc2f-4ce9-b0b1-e21f4c641699, infoPort=39031, infoSecurePort=0, ipcPort=39421, storageInfo=lv=-57;cid=testClusterID;nsid=2031381364;c=1732562799754), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:26:53,655 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@211eec9a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/java.io.tmpdir/jetty-localhost-36383-hadoop-hdfs-3_4_1-tests_jar-_-any-16878906623481946601/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:26:53,656 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3903d405{HTTP/1.1, (http/1.1)}{localhost:36383} 2024-11-25T19:26:53,656 INFO [Time-limited test {}] server.Server(415): Started @164244ms 2024-11-25T19:26:53,657 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T19:26:53,720 WARN [Thread-1361 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T19:26:53,723 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe77dd829924800a2 with lease ID 0x93a34b58ead49f05: from storage DS-f13b0552-e90a-4b76-906c-15ce447e345b node DatanodeRegistration(127.0.0.1:45037, datanodeUuid=74458731-997c-4e21-af71-241618d82c62, infoPort=42497, infoSecurePort=0, ipcPort=39183, storageInfo=lv=-57;cid=testClusterID;nsid=2031381364;c=1732562799754), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-25T19:26:53,723 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe77dd829924800a2 with lease ID 0x93a34b58ead49f05: from storage DS-0949797b-4da7-4541-8047-23d4b2e49c73 node DatanodeRegistration(127.0.0.1:45037, datanodeUuid=74458731-997c-4e21-af71-241618d82c62, infoPort=42497, infoSecurePort=0, ipcPort=39183, storageInfo=lv=-57;cid=testClusterID;nsid=2031381364;c=1732562799754), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:26:54,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:54,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:54,674 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-25T19:26:54,677 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-25T19:26:54,680 ERROR [FSHLog-0-hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef-prefix:6ef6ccb75414,35585,1732562800390 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36655,DS-f13b0552-e90a-4b76-906c-15ce447e345b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:54,680 WARN [FSHLog-0-hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef-prefix:6ef6ccb75414,35585,1732562800390 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36655,DS-f13b0552-e90a-4b76-906c-15ce447e345b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:54,680 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6ef6ccb75414%2C35585%2C1732562800390:(num 1732562800785) roll requested 2024-11-25T19:26:54,681 INFO [regionserver/6ef6ccb75414:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C35585%2C1732562800390.1732562814680 2024-11-25T19:26:54,688 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562800785 newFile=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562814680 2024-11-25T19:26:54,688 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:54,688 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:54,688 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:54,688 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:54,689 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:26:54,689 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562800785 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562814680 2024-11-25T19:26:54,689 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36655,DS-f13b0552-e90a-4b76-906c-15ce447e345b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:54,689 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36655,DS-f13b0552-e90a-4b76-906c-15ce447e345b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:54,689 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562800785 2024-11-25T19:26:54,690 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39031:39031),(127.0.0.1/127.0.0.1:42497:42497)] 2024-11-25T19:26:54,690 WARN [IPC Server handler 2 on default port 35789 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562800785 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-25T19:26:54,690 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562800785 is not closed yet, will try archiving it next time 2024-11-25T19:26:54,690 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562800785 after 1ms 2024-11-25T19:26:55,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:55,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:55,591 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-25T19:26:56,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:56,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:56,694 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-25T19:26:57,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:57,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:58,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:58,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:58,691 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562800785 after 4002ms 2024-11-25T19:26:58,698 WARN [ResponseProcessor for block BP-1990144314-172.17.0.2-1732562799754:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1990144314-172.17.0.2-1732562799754:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-1990144314-172.17.0.2-1732562799754:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:45037,DS-f13b0552-e90a-4b76-906c-15ce447e345b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:58,698 WARN [DataStreamer for file /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562814680 block BP-1990144314-172.17.0.2-1732562799754:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1990144314-172.17.0.2-1732562799754:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33061,DS-10949356-e571-40d5-89df-7c039cd88a0a,DISK], DatanodeInfoWithStorage[127.0.0.1:45037,DS-f13b0552-e90a-4b76-906c-15ce447e345b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45037,DS-f13b0552-e90a-4b76-906c-15ce447e345b,DISK]) is bad. 2024-11-25T19:26:58,699 WARN [PacketResponder: BP-1990144314-172.17.0.2-1732562799754:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:45037] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:58,699 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1786850912_22 at /127.0.0.1:53546 [Receiving block BP-1990144314-172.17.0.2-1732562799754:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:33061:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53546 dst: /127.0.0.1:33061 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:58,700 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1786850912_22 at /127.0.0.1:60264 [Receiving block BP-1990144314-172.17.0.2-1732562799754:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45037:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60264 dst: /127.0.0.1:45037 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:58,701 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@211eec9a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:26:58,702 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3903d405{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:26:58,702 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:26:58,702 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68a8c2cd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:26:58,703 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70d6804b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/hadoop.log.dir/,STOPPED} 2024-11-25T19:26:58,704 WARN [BP-1990144314-172.17.0.2-1732562799754 heartbeating to localhost/127.0.0.1:35789 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T19:26:58,704 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T19:26:58,705 WARN [BP-1990144314-172.17.0.2-1732562799754 heartbeating to localhost/127.0.0.1:35789 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1990144314-172.17.0.2-1732562799754 (Datanode Uuid 74458731-997c-4e21-af71-241618d82c62) service to localhost/127.0.0.1:35789 2024-11-25T19:26:58,705 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T19:26:58,706 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/cluster_e7685679-4b75-9f40-a868-f670a5ae705c/data/data1/current/BP-1990144314-172.17.0.2-1732562799754 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:26:58,707 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/cluster_e7685679-4b75-9f40-a868-f670a5ae705c/data/data2/current/BP-1990144314-172.17.0.2-1732562799754 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:26:58,707 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T19:26:58,716 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:26:58,720 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:26:58,720 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:26:58,720 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:26:58,721 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T19:26:58,721 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d61cf28{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:26:58,721 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f1a012{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:26:58,816 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@38f0c18c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/java.io.tmpdir/jetty-localhost-32957-hadoop-hdfs-3_4_1-tests_jar-_-any-17983107623535440938/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:26:58,816 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1dcfabbb{HTTP/1.1, (http/1.1)}{localhost:32957} 2024-11-25T19:26:58,816 INFO [Time-limited test {}] server.Server(415): Started @169404ms 2024-11-25T19:26:58,817 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T19:26:58,834 WARN [ResponseProcessor for block BP-1990144314-172.17.0.2-1732562799754:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1990144314-172.17.0.2-1732562799754:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:26:58,834 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1786850912_22 at /127.0.0.1:53572 [Receiving block BP-1990144314-172.17.0.2-1732562799754:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:33061:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53572 dst: /127.0.0.1:33061 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:26:58,838 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5a0e55c1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:26:58,839 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7aa94e72{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:26:58,839 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:26:58,839 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2cce6536{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:26:58,839 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b2d1260{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/hadoop.log.dir/,STOPPED} 2024-11-25T19:26:58,840 WARN [BP-1990144314-172.17.0.2-1732562799754 heartbeating to localhost/127.0.0.1:35789 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T19:26:58,840 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T19:26:58,840 WARN [BP-1990144314-172.17.0.2-1732562799754 heartbeating to localhost/127.0.0.1:35789 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1990144314-172.17.0.2-1732562799754 (Datanode Uuid 9002a095-cc2f-4ce9-b0b1-e21f4c641699) service to localhost/127.0.0.1:35789 2024-11-25T19:26:58,840 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T19:26:58,841 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/cluster_e7685679-4b75-9f40-a868-f670a5ae705c/data/data3/current/BP-1990144314-172.17.0.2-1732562799754 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:26:58,841 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/cluster_e7685679-4b75-9f40-a868-f670a5ae705c/data/data4/current/BP-1990144314-172.17.0.2-1732562799754 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:26:58,842 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T19:26:58,850 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:26:58,856 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:26:58,857 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:26:58,857 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:26:58,857 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T19:26:58,858 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7939cb3e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:26:58,858 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5d802677{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:26:58,911 WARN [Thread-1404 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T19:26:58,913 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x18820b85819925a3 with lease ID 0x93a34b58ead49f06: from storage DS-f13b0552-e90a-4b76-906c-15ce447e345b node DatanodeRegistration(127.0.0.1:37371, datanodeUuid=74458731-997c-4e21-af71-241618d82c62, infoPort=36191, infoSecurePort=0, ipcPort=34959, storageInfo=lv=-57;cid=testClusterID;nsid=2031381364;c=1732562799754), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:26:58,913 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x18820b85819925a3 with lease ID 0x93a34b58ead49f06: from storage DS-0949797b-4da7-4541-8047-23d4b2e49c73 node DatanodeRegistration(127.0.0.1:37371, datanodeUuid=74458731-997c-4e21-af71-241618d82c62, infoPort=36191, infoSecurePort=0, ipcPort=34959, storageInfo=lv=-57;cid=testClusterID;nsid=2031381364;c=1732562799754), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:26:58,988 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6b4066bb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/java.io.tmpdir/jetty-localhost-33227-hadoop-hdfs-3_4_1-tests_jar-_-any-18137840520893629485/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:26:58,988 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21404da7{HTTP/1.1, (http/1.1)}{localhost:33227} 2024-11-25T19:26:58,988 INFO [Time-limited test {}] server.Server(415): Started @169576ms 2024-11-25T19:26:58,990 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T19:26:59,055 WARN [Thread-1435 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T19:26:59,057 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x62ea0acbd3912d8c with lease ID 0x93a34b58ead49f07: from storage DS-10949356-e571-40d5-89df-7c039cd88a0a node DatanodeRegistration(127.0.0.1:41079, datanodeUuid=9002a095-cc2f-4ce9-b0b1-e21f4c641699, infoPort=39031, infoSecurePort=0, ipcPort=44295, storageInfo=lv=-57;cid=testClusterID;nsid=2031381364;c=1732562799754), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-25T19:26:59,057 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x62ea0acbd3912d8c with lease ID 0x93a34b58ead49f07: from storage DS-375c955e-1fd4-42f3-84ab-356c4e08868b node DatanodeRegistration(127.0.0.1:41079, datanodeUuid=9002a095-cc2f-4ce9-b0b1-e21f4c641699, infoPort=39031, infoSecurePort=0, ipcPort=44295, storageInfo=lv=-57;cid=testClusterID;nsid=2031381364;c=1732562799754), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:26:59,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:26:59,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:00,006 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-25T19:27:00,010 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-25T19:27:00,013 ERROR [FSHLog-0-hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef-prefix:6ef6ccb75414,35585,1732562800390 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33061,DS-10949356-e571-40d5-89df-7c039cd88a0a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:27:00,013 WARN [FSHLog-0-hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef-prefix:6ef6ccb75414,35585,1732562800390 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33061,DS-10949356-e571-40d5-89df-7c039cd88a0a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:27:00,013 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6ef6ccb75414%2C35585%2C1732562800390:(num 1732562814680) roll requested 2024-11-25T19:27:00,014 INFO [regionserver/6ef6ccb75414:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C35585%2C1732562800390.1732562820014 2024-11-25T19:27:00,020 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562814680 newFile=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562820014 2024-11-25T19:27:00,020 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:00,020 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:00,020 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:00,021 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:00,021 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:00,021 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562814680 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562820014 2024-11-25T19:27:00,021 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33061,DS-10949356-e571-40d5-89df-7c039cd88a0a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:27:00,021 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33061,DS-10949356-e571-40d5-89df-7c039cd88a0a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:27:00,021 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562814680 2024-11-25T19:27:00,022 WARN [IPC Server handler 4 on default port 35789 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562814680 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-25T19:27:00,022 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562814680 after 1ms 2024-11-25T19:27:00,024 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36191:36191),(127.0.0.1/127.0.0.1:39031:39031)] 2024-11-25T19:27:00,024 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562814680 is not closed yet, will try archiving it next time 2024-11-25T19:27:00,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:00,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:01,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:01,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:02,028 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C35585%2C1732562800390.1732562822028 2024-11-25T19:27:02,040 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562820014 newFile=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562822028 2024-11-25T19:27:02,040 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:02,040 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:02,040 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:02,040 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:02,040 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:02,041 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562820014 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562822028 2024-11-25T19:27:02,043 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39031:39031),(127.0.0.1/127.0.0.1:36191:36191)] 2024-11-25T19:27:02,043 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562814680 is not closed yet, will try archiving it next time 2024-11-25T19:27:02,043 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562820014 is not closed yet, will try archiving it next time 2024-11-25T19:27:02,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741838_1019 (size=1264) 2024-11-25T19:27:02,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37371 is added to blk_1073741838_1019 (size=1264) 2024-11-25T19:27:02,043 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562800785 2024-11-25T19:27:02,043 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562800785 2024-11-25T19:27:02,044 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562800785 after 1ms 2024-11-25T19:27:02,044 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562800785 2024-11-25T19:27:02,044 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562814680 is not closed yet, will try archiving it next time 2024-11-25T19:27:02,055 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732562801691/Put/vlen=218/seqid=0] 2024-11-25T19:27:02,055 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732562811390/Put/vlen=1045/seqid=0] 2024-11-25T19:27:02,055 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562800785 2024-11-25T19:27:02,055 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562814680 2024-11-25T19:27:02,055 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562814680 2024-11-25T19:27:02,055 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562814680 after 0ms 2024-11-25T19:27:02,055 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562814680 2024-11-25T19:27:02,059 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732562814679/Put/vlen=1045/seqid=0] 2024-11-25T19:27:02,059 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732562816695/Put/vlen=1045/seqid=0] 2024-11-25T19:27:02,059 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562814680 2024-11-25T19:27:02,059 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562820014 2024-11-25T19:27:02,059 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562820014 2024-11-25T19:27:02,059 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562820014 after 0ms 2024-11-25T19:27:02,059 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562820014 2024-11-25T19:27:02,062 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732562820012/Put/vlen=1045/seqid=0] 2024-11-25T19:27:02,062 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562822028 2024-11-25T19:27:02,062 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562822028 2024-11-25T19:27:02,063 WARN [IPC Server handler 0 on default port 35789 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562822028 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-25T19:27:02,063 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562822028 after 1ms 2024-11-25T19:27:02,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:02,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:03,061 WARN [ResponseProcessor for block BP-1990144314-172.17.0.2-1732562799754:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1990144314-172.17.0.2-1732562799754:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:27:03,061 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_149307764_22 at /127.0.0.1:41838 [Receiving block BP-1990144314-172.17.0.2-1732562799754:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:41079:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41838 dst: /127.0.0.1:41079 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:41079 remote=/127.0.0.1:41838]. Total timeout mills is 60000, 58979 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:27:03,062 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_149307764_22 at /127.0.0.1:36998 [Receiving block BP-1990144314-172.17.0.2-1732562799754:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:37371:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36998 dst: /127.0.0.1:37371 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:27:03,062 WARN [DataStreamer for file /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562822028 block BP-1990144314-172.17.0.2-1732562799754:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1990144314-172.17.0.2-1732562799754:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41079,DS-10949356-e571-40d5-89df-7c039cd88a0a,DISK], DatanodeInfoWithStorage[127.0.0.1:37371,DS-f13b0552-e90a-4b76-906c-15ce447e345b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41079,DS-10949356-e571-40d5-89df-7c039cd88a0a,DISK]) is bad. 2024-11-25T19:27:03,069 WARN [DataStreamer for file /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562822028 block BP-1990144314-172.17.0.2-1732562799754:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1990144314-172.17.0.2-1732562799754:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:27:03,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741839_1022 (size=85) 2024-11-25T19:27:03,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37371 is added to blk_1073741839_1022 (size=85) 2024-11-25T19:27:03,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:03,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:04,024 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562814680 after 4003ms 2024-11-25T19:27:04,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:04,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:04,916 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-25T19:27:05,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:05,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:06,064 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562822028 after 4002ms 2024-11-25T19:27:06,064 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562822028 2024-11-25T19:27:06,073 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562822028 2024-11-25T19:27:06,074 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing c550580bf5c247fe7ffa5d0f194b423c 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-25T19:27:06,075 ERROR [FSHLog-0-hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef-prefix:6ef6ccb75414,35585,1732562800390 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1990144314-172.17.0.2-1732562799754:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:27:06,076 WARN [FSHLog-0-hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef-prefix:6ef6ccb75414,35585,1732562800390 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1990144314-172.17.0.2-1732562799754:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:27:06,077 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6ef6ccb75414%2C35585%2C1732562800390:(num 1732562822028) roll requested 2024-11-25T19:27:06,077 INFO [regionserver/6ef6ccb75414:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C35585%2C1732562800390.1732562826077 2024-11-25T19:27:06,083 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562822028 newFile=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562826077 2024-11-25T19:27:06,084 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:06,084 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:06,084 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:06,084 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:06,084 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:06,084 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562822028 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562826077 2024-11-25T19:27:06,084 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1990144314-172.17.0.2-1732562799754:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:27:06,085 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1990144314-172.17.0.2-1732562799754:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:27:06,085 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562822028 2024-11-25T19:27:06,086 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36191:36191),(127.0.0.1/127.0.0.1:39031:39031)] 2024-11-25T19:27:06,086 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562822028 is not closed yet, will try archiving it next time 2024-11-25T19:27:06,086 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562822028 after 1ms 2024-11-25T19:27:06,086 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.1732562822028 to hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/oldWALs/6ef6ccb75414%2C35585%2C1732562800390.1732562822028 2024-11-25T19:27:06,100 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/default/TestLogRolling-testLogRollOnPipelineRestart/c550580bf5c247fe7ffa5d0f194b423c/.tmp/info/a118455caaf84bbdad89912438cf1732 is 1080, key is row1002/info:/1732562811390/Put/seqid=0 2024-11-25T19:27:06,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741841_1024 (size=9270) 2024-11-25T19:27:06,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37371 is added to blk_1073741841_1024 (size=9270) 2024-11-25T19:27:06,106 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/default/TestLogRolling-testLogRollOnPipelineRestart/c550580bf5c247fe7ffa5d0f194b423c/.tmp/info/a118455caaf84bbdad89912438cf1732 2024-11-25T19:27:06,113 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/default/TestLogRolling-testLogRollOnPipelineRestart/c550580bf5c247fe7ffa5d0f194b423c/.tmp/info/a118455caaf84bbdad89912438cf1732 as hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/default/TestLogRolling-testLogRollOnPipelineRestart/c550580bf5c247fe7ffa5d0f194b423c/info/a118455caaf84bbdad89912438cf1732 2024-11-25T19:27:06,118 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/default/TestLogRolling-testLogRollOnPipelineRestart/c550580bf5c247fe7ffa5d0f194b423c/info/a118455caaf84bbdad89912438cf1732, entries=4, sequenceid=8, filesize=9.1 K 2024-11-25T19:27:06,120 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for c550580bf5c247fe7ffa5d0f194b423c in 46ms, sequenceid=8, compaction requested=false 2024-11-25T19:27:06,120 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for c550580bf5c247fe7ffa5d0f194b423c: 2024-11-25T19:27:06,120 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-25T19:27:06,120 ERROR [FSHLog-0-hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef-prefix:6ef6ccb75414,35585,1732562800390.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36655,DS-f13b0552-e90a-4b76-906c-15ce447e345b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:27:06,120 WARN [FSHLog-0-hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef-prefix:6ef6ccb75414,35585,1732562800390.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36655,DS-f13b0552-e90a-4b76-906c-15ce447e345b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:27:06,120 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6ef6ccb75414%2C35585%2C1732562800390.meta:.meta(num 1732562801181) roll requested 2024-11-25T19:27:06,121 INFO [regionserver/6ef6ccb75414:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C35585%2C1732562800390.meta.1732562826121.meta 2024-11-25T19:27:06,126 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:06,126 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:06,126 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:06,126 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:06,126 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:06,126 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.meta.1732562801181.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.meta.1732562826121.meta 2024-11-25T19:27:06,126 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36655,DS-f13b0552-e90a-4b76-906c-15ce447e345b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:27:06,127 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36655,DS-f13b0552-e90a-4b76-906c-15ce447e345b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:27:06,127 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.meta.1732562801181.meta 2024-11-25T19:27:06,127 WARN [IPC Server handler 4 on default port 35789 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.meta.1732562801181.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1013 2024-11-25T19:27:06,127 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.meta.1732562801181.meta after 0ms 2024-11-25T19:27:06,127 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36191:36191),(127.0.0.1/127.0.0.1:39031:39031)] 2024-11-25T19:27:06,128 DEBUG [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.meta.1732562801181.meta is not closed yet, will try archiving it next time 2024-11-25T19:27:06,142 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/hbase/meta/1588230740/.tmp/info/88d106d1374346ea950779c9e18f2072 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c./info:regioninfo/1732562801695/Put/seqid=0 2024-11-25T19:27:06,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741843_1027 (size=7125) 2024-11-25T19:27:06,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37371 is added to blk_1073741843_1027 (size=7125) 2024-11-25T19:27:06,147 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/hbase/meta/1588230740/.tmp/info/88d106d1374346ea950779c9e18f2072 2024-11-25T19:27:06,168 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/hbase/meta/1588230740/.tmp/ns/6e8376cf58a641c98f3530caab6c2eb5 is 43, key is default/ns:d/1732562801231/Put/seqid=0 2024-11-25T19:27:06,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741844_1028 (size=5153) 2024-11-25T19:27:06,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37371 is added to blk_1073741844_1028 (size=5153) 2024-11-25T19:27:06,173 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/hbase/meta/1588230740/.tmp/ns/6e8376cf58a641c98f3530caab6c2eb5 2024-11-25T19:27:06,191 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/hbase/meta/1588230740/.tmp/table/bb45688052344adbacccd935acfe95ab is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732562801706/Put/seqid=0 2024-11-25T19:27:06,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37371 is added to blk_1073741845_1029 (size=5438) 2024-11-25T19:27:06,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741845_1029 (size=5438) 2024-11-25T19:27:06,196 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/hbase/meta/1588230740/.tmp/table/bb45688052344adbacccd935acfe95ab 2024-11-25T19:27:06,202 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/hbase/meta/1588230740/.tmp/info/88d106d1374346ea950779c9e18f2072 as hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/hbase/meta/1588230740/info/88d106d1374346ea950779c9e18f2072 2024-11-25T19:27:06,207 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/hbase/meta/1588230740/info/88d106d1374346ea950779c9e18f2072, entries=10, sequenceid=11, filesize=7.0 K 2024-11-25T19:27:06,208 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/hbase/meta/1588230740/.tmp/ns/6e8376cf58a641c98f3530caab6c2eb5 as hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/hbase/meta/1588230740/ns/6e8376cf58a641c98f3530caab6c2eb5 2024-11-25T19:27:06,214 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/hbase/meta/1588230740/ns/6e8376cf58a641c98f3530caab6c2eb5, entries=2, sequenceid=11, filesize=5.0 K 2024-11-25T19:27:06,215 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/hbase/meta/1588230740/.tmp/table/bb45688052344adbacccd935acfe95ab as hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/hbase/meta/1588230740/table/bb45688052344adbacccd935acfe95ab 2024-11-25T19:27:06,221 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/hbase/meta/1588230740/table/bb45688052344adbacccd935acfe95ab, entries=2, sequenceid=11, filesize=5.3 K 2024-11-25T19:27:06,222 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 102ms, sequenceid=11, compaction requested=false 2024-11-25T19:27:06,222 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-25T19:27:06,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:06,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T19:27:06,227 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T19:27:06,227 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T19:27:06,227 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:27:06,227 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:27:06,227 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T19:27:06,227 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T19:27:06,227 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=95841690, stopped=false 2024-11-25T19:27:06,228 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6ef6ccb75414,33297,1732562800346 2024-11-25T19:27:06,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:06,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T19:27:06,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x1007859ab370001, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T19:27:06,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x1007859ab370001, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:27:06,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:27:06,229 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T19:27:06,229 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T19:27:06,229 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T19:27:06,229 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:27:06,229 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:27:06,229 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6ef6ccb75414,35585,1732562800390' ***** 2024-11-25T19:27:06,229 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T19:27:06,230 INFO [RS:0;6ef6ccb75414:35585 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T19:27:06,230 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35585-0x1007859ab370001, quorum=127.0.0.1:63006, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:27:06,230 INFO [RS:0;6ef6ccb75414:35585 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T19:27:06,230 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T19:27:06,230 INFO [RS:0;6ef6ccb75414:35585 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T19:27:06,230 INFO [RS:0;6ef6ccb75414:35585 {}] regionserver.HRegionServer(3091): Received CLOSE for c550580bf5c247fe7ffa5d0f194b423c 2024-11-25T19:27:06,230 INFO [RS:0;6ef6ccb75414:35585 {}] regionserver.HRegionServer(959): stopping server 6ef6ccb75414,35585,1732562800390 2024-11-25T19:27:06,230 INFO [RS:0;6ef6ccb75414:35585 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T19:27:06,230 INFO [RS:0;6ef6ccb75414:35585 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6ef6ccb75414:35585. 2024-11-25T19:27:06,230 DEBUG [RS:0;6ef6ccb75414:35585 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T19:27:06,230 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c550580bf5c247fe7ffa5d0f194b423c, disabling compactions & flushes 2024-11-25T19:27:06,230 DEBUG [RS:0;6ef6ccb75414:35585 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:27:06,230 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c. 2024-11-25T19:27:06,230 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c. 2024-11-25T19:27:06,230 INFO [RS:0;6ef6ccb75414:35585 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T19:27:06,230 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c. after waiting 0 ms 2024-11-25T19:27:06,230 INFO [RS:0;6ef6ccb75414:35585 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T19:27:06,230 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c. 2024-11-25T19:27:06,230 INFO [RS:0;6ef6ccb75414:35585 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T19:27:06,231 INFO [RS:0;6ef6ccb75414:35585 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T19:27:06,231 INFO [RS:0;6ef6ccb75414:35585 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-25T19:27:06,231 DEBUG [RS:0;6ef6ccb75414:35585 {}] regionserver.HRegionServer(1325): Online Regions={c550580bf5c247fe7ffa5d0f194b423c=TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c., 1588230740=hbase:meta,,1.1588230740} 2024-11-25T19:27:06,231 DEBUG [RS:0;6ef6ccb75414:35585 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, c550580bf5c247fe7ffa5d0f194b423c 2024-11-25T19:27:06,231 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T19:27:06,231 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T19:27:06,231 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T19:27:06,231 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T19:27:06,231 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T19:27:06,235 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/default/TestLogRolling-testLogRollOnPipelineRestart/c550580bf5c247fe7ffa5d0f194b423c/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-25T19:27:06,235 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-25T19:27:06,235 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c. 2024-11-25T19:27:06,235 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T19:27:06,235 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c550580bf5c247fe7ffa5d0f194b423c: Waiting for close lock at 1732562826230Running coprocessor pre-close hooks at 1732562826230Disabling compacts and flushes for region at 1732562826230Disabling writes for close at 1732562826230Writing region close event to WAL at 1732562826231 (+1 ms)Running coprocessor post-close hooks at 1732562826235 (+4 ms)Closed at 1732562826235 2024-11-25T19:27:06,235 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T19:27:06,235 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732562826231Running coprocessor pre-close hooks at 1732562826231Disabling compacts and flushes for region at 1732562826231Disabling writes for close at 1732562826231Writing region close event to WAL at 1732562826232 (+1 ms)Running coprocessor post-close hooks at 1732562826235 (+3 ms)Closed at 1732562826235 2024-11-25T19:27:06,235 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732562801331.c550580bf5c247fe7ffa5d0f194b423c. 2024-11-25T19:27:06,236 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T19:27:06,431 INFO [RS:0;6ef6ccb75414:35585 {}] regionserver.HRegionServer(976): stopping server 6ef6ccb75414,35585,1732562800390; all regions closed. 2024-11-25T19:27:06,433 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:06,433 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:06,433 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:06,434 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:06,434 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:06,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741842_1025 (size=825) 2024-11-25T19:27:06,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37371 is added to blk_1073741842_1025 (size=825) 2024-11-25T19:27:06,652 INFO [regionserver/6ef6ccb75414:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T19:27:06,715 INFO [regionserver/6ef6ccb75414:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-25T19:27:06,715 INFO [regionserver/6ef6ccb75414:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-25T19:27:07,057 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-25T19:27:07,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:07,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:08,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:08,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:09,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:09,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:10,129 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.meta.1732562801181.meta after 4002ms 2024-11-25T19:27:10,130 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/WALs/6ef6ccb75414,35585,1732562800390/6ef6ccb75414%2C35585%2C1732562800390.meta.1732562801181.meta to hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/oldWALs/6ef6ccb75414%2C35585%2C1732562800390.meta.1732562801181.meta 2024-11-25T19:27:10,138 DEBUG [RS:0;6ef6ccb75414:35585 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/oldWALs 2024-11-25T19:27:10,138 INFO [RS:0;6ef6ccb75414:35585 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6ef6ccb75414%2C35585%2C1732562800390.meta:.meta(num 1732562826121) 2024-11-25T19:27:10,139 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:10,139 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:10,140 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:10,140 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:10,140 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:10,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741840_1023 (size=1162) 2024-11-25T19:27:10,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37371 is added to blk_1073741840_1023 (size=1162) 2024-11-25T19:27:10,149 DEBUG [RS:0;6ef6ccb75414:35585 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/oldWALs 2024-11-25T19:27:10,149 INFO [RS:0;6ef6ccb75414:35585 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6ef6ccb75414%2C35585%2C1732562800390:(num 1732562826077) 2024-11-25T19:27:10,149 DEBUG [RS:0;6ef6ccb75414:35585 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:27:10,149 INFO [RS:0;6ef6ccb75414:35585 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T19:27:10,149 INFO [RS:0;6ef6ccb75414:35585 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T19:27:10,150 INFO [RS:0;6ef6ccb75414:35585 {}] hbase.ChoreService(370): Chore service for: regionserver/6ef6ccb75414:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-25T19:27:10,150 INFO [RS:0;6ef6ccb75414:35585 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T19:27:10,150 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T19:27:10,150 INFO [RS:0;6ef6ccb75414:35585 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35585 2024-11-25T19:27:10,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x1007859ab370001, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6ef6ccb75414,35585,1732562800390 2024-11-25T19:27:10,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T19:27:10,153 INFO [RS:0;6ef6ccb75414:35585 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T19:27:10,154 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6ef6ccb75414,35585,1732562800390] 2024-11-25T19:27:10,155 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6ef6ccb75414,35585,1732562800390 already deleted, retry=false 2024-11-25T19:27:10,155 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6ef6ccb75414,35585,1732562800390 expired; onlineServers=0 2024-11-25T19:27:10,155 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6ef6ccb75414,33297,1732562800346' ***** 2024-11-25T19:27:10,155 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T19:27:10,155 INFO [M:0;6ef6ccb75414:33297 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T19:27:10,155 INFO [M:0;6ef6ccb75414:33297 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T19:27:10,156 DEBUG [M:0;6ef6ccb75414:33297 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T19:27:10,156 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T19:27:10,156 DEBUG [M:0;6ef6ccb75414:33297 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T19:27:10,156 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.large.0-1732562800565 {}] cleaner.HFileCleaner(306): Exit Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.large.0-1732562800565,5,FailOnTimeoutGroup] 2024-11-25T19:27:10,156 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.small.0-1732562800565 {}] cleaner.HFileCleaner(306): Exit Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.small.0-1732562800565,5,FailOnTimeoutGroup] 2024-11-25T19:27:10,156 INFO [M:0;6ef6ccb75414:33297 {}] hbase.ChoreService(370): Chore service for: master/6ef6ccb75414:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T19:27:10,156 INFO [M:0;6ef6ccb75414:33297 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T19:27:10,156 DEBUG [M:0;6ef6ccb75414:33297 {}] master.HMaster(1795): Stopping service threads 2024-11-25T19:27:10,156 INFO [M:0;6ef6ccb75414:33297 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T19:27:10,156 INFO [M:0;6ef6ccb75414:33297 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T19:27:10,157 INFO [M:0;6ef6ccb75414:33297 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T19:27:10,157 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T19:27:10,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T19:27:10,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:27:10,157 DEBUG [M:0;6ef6ccb75414:33297 {}] zookeeper.ZKUtil(347): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-25T19:27:10,157 WARN [M:0;6ef6ccb75414:33297 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-25T19:27:10,158 INFO [M:0;6ef6ccb75414:33297 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/.lastflushedseqids 2024-11-25T19:27:10,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37371 is added to blk_1073741846_1030 (size=130) 2024-11-25T19:27:10,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741846_1030 (size=130) 2024-11-25T19:27:10,165 INFO [M:0;6ef6ccb75414:33297 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T19:27:10,165 INFO [M:0;6ef6ccb75414:33297 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T19:27:10,165 DEBUG [M:0;6ef6ccb75414:33297 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T19:27:10,165 INFO [M:0;6ef6ccb75414:33297 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:27:10,165 DEBUG [M:0;6ef6ccb75414:33297 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:27:10,165 DEBUG [M:0;6ef6ccb75414:33297 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T19:27:10,165 DEBUG [M:0;6ef6ccb75414:33297 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:27:10,165 INFO [M:0;6ef6ccb75414:33297 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-25T19:27:10,166 ERROR [FSHLog-0-hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData-prefix:6ef6ccb75414,33297,1732562800346 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36655,DS-f13b0552-e90a-4b76-906c-15ce447e345b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:27:10,166 WARN [FSHLog-0-hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData-prefix:6ef6ccb75414,33297,1732562800346 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36655,DS-f13b0552-e90a-4b76-906c-15ce447e345b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:27:10,166 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 6ef6ccb75414%2C33297%2C1732562800346:(num 1732562800487) roll requested 2024-11-25T19:27:10,166 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C33297%2C1732562800346.1732562830166 2024-11-25T19:27:10,171 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:10,171 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:10,171 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:10,171 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:10,171 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:10,172 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/WALs/6ef6ccb75414,33297,1732562800346/6ef6ccb75414%2C33297%2C1732562800346.1732562800487 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/WALs/6ef6ccb75414,33297,1732562800346/6ef6ccb75414%2C33297%2C1732562800346.1732562830166 2024-11-25T19:27:10,172 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36655,DS-f13b0552-e90a-4b76-906c-15ce447e345b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:27:10,172 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36655,DS-f13b0552-e90a-4b76-906c-15ce447e345b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T19:27:10,172 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/WALs/6ef6ccb75414,33297,1732562800346/6ef6ccb75414%2C33297%2C1732562800346.1732562800487 2024-11-25T19:27:10,172 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39031:39031),(127.0.0.1/127.0.0.1:36191:36191)] 2024-11-25T19:27:10,172 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/WALs/6ef6ccb75414,33297,1732562800346/6ef6ccb75414%2C33297%2C1732562800346.1732562800487 is not closed yet, will try archiving it next time 2024-11-25T19:27:10,172 WARN [IPC Server handler 1 on default port 35789 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/WALs/6ef6ccb75414,33297,1732562800346/6ef6ccb75414%2C33297%2C1732562800346.1732562800487 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-25T19:27:10,173 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/WALs/6ef6ccb75414,33297,1732562800346/6ef6ccb75414%2C33297%2C1732562800346.1732562800487 after 1ms 2024-11-25T19:27:10,185 DEBUG [M:0;6ef6ccb75414:33297 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ead9e10b8be442e5935a0081f420a21a is 82, key is hbase:meta,,1/info:regioninfo/1732562801218/Put/seqid=0 2024-11-25T19:27:10,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37371 is added to blk_1073741848_1033 (size=5672) 2024-11-25T19:27:10,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741848_1033 (size=5672) 2024-11-25T19:27:10,191 INFO [M:0;6ef6ccb75414:33297 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ead9e10b8be442e5935a0081f420a21a 2024-11-25T19:27:10,208 DEBUG [M:0;6ef6ccb75414:33297 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ecb67ded1a1c4e488e6def94e7385589 is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732562801710/Put/seqid=0 2024-11-25T19:27:10,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37371 is added to blk_1073741849_1034 (size=6119) 2024-11-25T19:27:10,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741849_1034 (size=6119) 2024-11-25T19:27:10,213 INFO [M:0;6ef6ccb75414:33297 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ecb67ded1a1c4e488e6def94e7385589 2024-11-25T19:27:10,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:10,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:10,230 DEBUG [M:0;6ef6ccb75414:33297 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ebff61c549374d27aefb72ea5b12cff5 is 69, key is 6ef6ccb75414,35585,1732562800390/rs:state/1732562800634/Put/seqid=0 2024-11-25T19:27:10,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37371 is added to blk_1073741850_1035 (size=5156) 2024-11-25T19:27:10,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741850_1035 (size=5156) 2024-11-25T19:27:10,236 INFO [M:0;6ef6ccb75414:33297 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ebff61c549374d27aefb72ea5b12cff5 2024-11-25T19:27:10,253 DEBUG [M:0;6ef6ccb75414:33297 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9fc7aec55c534c94a5d580f38c6b2544 is 52, key is load_balancer_on/state:d/1732562801326/Put/seqid=0 2024-11-25T19:27:10,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x1007859ab370001, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:27:10,254 INFO [RS:0;6ef6ccb75414:35585 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T19:27:10,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x1007859ab370001, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:27:10,254 INFO [RS:0;6ef6ccb75414:35585 {}] regionserver.HRegionServer(1031): Exiting; stopping=6ef6ccb75414,35585,1732562800390; zookeeper connection closed. 2024-11-25T19:27:10,255 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@a6bb9ae {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@a6bb9ae 2024-11-25T19:27:10,255 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-25T19:27:10,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37371 is added to blk_1073741851_1036 (size=5056) 2024-11-25T19:27:10,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741851_1036 (size=5056) 2024-11-25T19:27:10,258 INFO [M:0;6ef6ccb75414:33297 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9fc7aec55c534c94a5d580f38c6b2544 2024-11-25T19:27:10,263 DEBUG [M:0;6ef6ccb75414:33297 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ead9e10b8be442e5935a0081f420a21a as hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ead9e10b8be442e5935a0081f420a21a 2024-11-25T19:27:10,268 INFO [M:0;6ef6ccb75414:33297 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ead9e10b8be442e5935a0081f420a21a, entries=8, sequenceid=56, filesize=5.5 K 2024-11-25T19:27:10,269 DEBUG [M:0;6ef6ccb75414:33297 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ecb67ded1a1c4e488e6def94e7385589 as hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ecb67ded1a1c4e488e6def94e7385589 2024-11-25T19:27:10,275 INFO [M:0;6ef6ccb75414:33297 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ecb67ded1a1c4e488e6def94e7385589, entries=6, sequenceid=56, filesize=6.0 K 2024-11-25T19:27:10,276 DEBUG [M:0;6ef6ccb75414:33297 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ebff61c549374d27aefb72ea5b12cff5 as hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ebff61c549374d27aefb72ea5b12cff5 2024-11-25T19:27:10,281 INFO [M:0;6ef6ccb75414:33297 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ebff61c549374d27aefb72ea5b12cff5, entries=1, sequenceid=56, filesize=5.0 K 2024-11-25T19:27:10,282 DEBUG [M:0;6ef6ccb75414:33297 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9fc7aec55c534c94a5d580f38c6b2544 as hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9fc7aec55c534c94a5d580f38c6b2544 2024-11-25T19:27:10,289 INFO [M:0;6ef6ccb75414:33297 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9fc7aec55c534c94a5d580f38c6b2544, entries=1, sequenceid=56, filesize=4.9 K 2024-11-25T19:27:10,290 INFO [M:0;6ef6ccb75414:33297 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=56, compaction requested=false 2024-11-25T19:27:10,291 INFO [M:0;6ef6ccb75414:33297 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:27:10,292 DEBUG [M:0;6ef6ccb75414:33297 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732562830165Disabling compacts and flushes for region at 1732562830165Disabling writes for close at 1732562830165Obtaining lock to block concurrent updates at 1732562830165Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732562830165Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1732562830166 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732562830173 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732562830173Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732562830185 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732562830185Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732562830195 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732562830207 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732562830207Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732562830218 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732562830230 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732562830230Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732562830240 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732562830252 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732562830252Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3014cc62: reopening flushed file at 1732562830262 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2dc195ee: reopening flushed file at 1732562830268 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48ccbed0: reopening flushed file at 1732562830275 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6630cc34: reopening flushed file at 1732562830282 (+7 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=56, compaction requested=false at 1732562830290 (+8 ms)Writing region close event to WAL at 1732562830291 (+1 ms)Closed at 1732562830291 2024-11-25T19:27:10,292 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:10,292 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:10,292 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:10,292 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:10,292 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:10,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37371 is added to blk_1073741847_1031 (size=757) 2024-11-25T19:27:10,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741847_1031 (size=757) 2024-11-25T19:27:10,326 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T19:27:11,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:11,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:11,236 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,237 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,258 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,258 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,258 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,258 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,258 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,262 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,771 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T19:27:11,774 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,774 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,775 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,776 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,795 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,795 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,796 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,796 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,796 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,796 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:11,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:12,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:12,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:13,058 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-25T19:27:13,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:13,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:14,174 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/WALs/6ef6ccb75414,33297,1732562800346/6ef6ccb75414%2C33297%2C1732562800346.1732562800487 after 4002ms 2024-11-25T19:27:14,175 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/WALs/6ef6ccb75414,33297,1732562800346/6ef6ccb75414%2C33297%2C1732562800346.1732562800487 to hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/oldWALs/6ef6ccb75414%2C33297%2C1732562800346.1732562800487 2024-11-25T19:27:14,184 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/MasterData/oldWALs/6ef6ccb75414%2C33297%2C1732562800346.1732562800487 to hdfs://localhost:35789/user/jenkins/test-data/b37cc52f-1dca-ed73-f8a2-cbec01eca7ef/oldWALs/6ef6ccb75414%2C33297%2C1732562800346.1732562800487$masterlocalwal$ 2024-11-25T19:27:14,185 INFO [M:0;6ef6ccb75414:33297 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T19:27:14,185 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T19:27:14,185 INFO [M:0;6ef6ccb75414:33297 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33297 2024-11-25T19:27:14,185 INFO [M:0;6ef6ccb75414:33297 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T19:27:14,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:14,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:14,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:27:14,287 INFO [M:0;6ef6ccb75414:33297 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T19:27:14,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33297-0x1007859ab370000, quorum=127.0.0.1:63006, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:27:14,291 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6b4066bb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:27:14,292 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21404da7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:27:14,292 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:27:14,292 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5d802677{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:27:14,293 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7939cb3e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/hadoop.log.dir/,STOPPED} 2024-11-25T19:27:14,295 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T19:27:14,295 WARN [BP-1990144314-172.17.0.2-1732562799754 heartbeating to localhost/127.0.0.1:35789 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T19:27:14,295 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T19:27:14,295 WARN [BP-1990144314-172.17.0.2-1732562799754 heartbeating to localhost/127.0.0.1:35789 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1990144314-172.17.0.2-1732562799754 (Datanode Uuid 9002a095-cc2f-4ce9-b0b1-e21f4c641699) service to localhost/127.0.0.1:35789 2024-11-25T19:27:14,296 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/cluster_e7685679-4b75-9f40-a868-f670a5ae705c/data/data3/current/BP-1990144314-172.17.0.2-1732562799754 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:27:14,296 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/cluster_e7685679-4b75-9f40-a868-f670a5ae705c/data/data4/current/BP-1990144314-172.17.0.2-1732562799754 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:27:14,296 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T19:27:14,300 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@38f0c18c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:27:14,300 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1dcfabbb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:27:14,300 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:27:14,300 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f1a012{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:27:14,300 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d61cf28{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/hadoop.log.dir/,STOPPED} 2024-11-25T19:27:14,302 WARN [BP-1990144314-172.17.0.2-1732562799754 heartbeating to localhost/127.0.0.1:35789 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T19:27:14,302 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T19:27:14,302 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T19:27:14,302 WARN [BP-1990144314-172.17.0.2-1732562799754 heartbeating to localhost/127.0.0.1:35789 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1990144314-172.17.0.2-1732562799754 (Datanode Uuid 74458731-997c-4e21-af71-241618d82c62) service to localhost/127.0.0.1:35789 2024-11-25T19:27:14,303 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/cluster_e7685679-4b75-9f40-a868-f670a5ae705c/data/data1/current/BP-1990144314-172.17.0.2-1732562799754 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:27:14,303 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/cluster_e7685679-4b75-9f40-a868-f670a5ae705c/data/data2/current/BP-1990144314-172.17.0.2-1732562799754 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:27:14,303 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T19:27:14,309 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3d54b888{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T19:27:14,310 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@24710539{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:27:14,310 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:27:14,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@38a1581{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:27:14,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c5497db{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/hadoop.log.dir/,STOPPED} 2024-11-25T19:27:14,316 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T19:27:14,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T19:27:14,342 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 153) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35789 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:35789 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35789 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35789 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35789 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35789 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35789 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35789 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 452) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=139 (was 122) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5116 (was 5706) 2024-11-25T19:27:14,349 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=139, ProcessCount=11, AvailableMemoryMB=5116 2024-11-25T19:27:14,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T19:27:14,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/hadoop.log.dir so I do NOT create it in target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288 2024-11-25T19:27:14,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f76a5c3-6e4f-7257-10a0-92da2ce9cdac/hadoop.tmp.dir so I do NOT create it in target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288 2024-11-25T19:27:14,349 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/cluster_ac4d3f7e-84df-9155-e77b-9a9c502acb43, deleteOnExit=true 2024-11-25T19:27:14,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-25T19:27:14,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/test.cache.data in system properties and HBase conf 2024-11-25T19:27:14,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T19:27:14,350 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/hadoop.log.dir in system properties and HBase conf 2024-11-25T19:27:14,350 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T19:27:14,350 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T19:27:14,350 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T19:27:14,350 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T19:27:14,350 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T19:27:14,350 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T19:27:14,350 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T19:27:14,350 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T19:27:14,350 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T19:27:14,350 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T19:27:14,350 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T19:27:14,351 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T19:27:14,351 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T19:27:14,351 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/nfs.dump.dir in system properties and HBase conf 2024-11-25T19:27:14,351 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/java.io.tmpdir in system properties and HBase conf 2024-11-25T19:27:14,351 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T19:27:14,351 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T19:27:14,351 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T19:27:14,364 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T19:27:14,409 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:27:14,413 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:27:14,414 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:27:14,414 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:27:14,414 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T19:27:14,415 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:27:14,415 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30eae670{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:27:14,415 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35b0b5b4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:27:14,508 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@e0da52c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/java.io.tmpdir/jetty-localhost-44777-hadoop-hdfs-3_4_1-tests_jar-_-any-15284098019772064111/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T19:27:14,508 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@637fc016{HTTP/1.1, (http/1.1)}{localhost:44777} 2024-11-25T19:27:14,508 INFO [Time-limited test {}] server.Server(415): Started @185096ms 2024-11-25T19:27:14,519 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T19:27:14,558 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:27:14,562 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:27:14,563 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:27:14,563 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:27:14,563 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T19:27:14,564 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@38f5461{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:27:14,564 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30c71845{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:27:14,658 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@a560185{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/java.io.tmpdir/jetty-localhost-45173-hadoop-hdfs-3_4_1-tests_jar-_-any-13218038188079793560/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:27:14,658 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1850586f{HTTP/1.1, (http/1.1)}{localhost:45173} 2024-11-25T19:27:14,658 INFO [Time-limited test {}] server.Server(415): Started @185246ms 2024-11-25T19:27:14,659 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T19:27:14,686 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:27:14,689 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:27:14,690 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:27:14,690 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:27:14,690 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T19:27:14,690 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40c8737d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:27:14,691 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6da95783{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:27:14,716 WARN [Thread-1629 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/cluster_ac4d3f7e-84df-9155-e77b-9a9c502acb43/data/data1/current/BP-1570157930-172.17.0.2-1732562834373/current, will proceed with Du for space computation calculation, 2024-11-25T19:27:14,716 WARN [Thread-1630 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/cluster_ac4d3f7e-84df-9155-e77b-9a9c502acb43/data/data2/current/BP-1570157930-172.17.0.2-1732562834373/current, will proceed with Du for space computation calculation, 2024-11-25T19:27:14,730 WARN [Thread-1608 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T19:27:14,732 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa851f2c0a7f1845e with lease ID 0xf3083a6cc7f6a067: Processing first storage report for DS-605938d1-a5d8-434b-ba41-04362a26f489 from datanode DatanodeRegistration(127.0.0.1:45807, datanodeUuid=99742ee7-a7b7-4c5f-98e2-68bae2abba81, infoPort=33047, infoSecurePort=0, ipcPort=42675, storageInfo=lv=-57;cid=testClusterID;nsid=1910325846;c=1732562834373) 2024-11-25T19:27:14,732 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa851f2c0a7f1845e with lease ID 0xf3083a6cc7f6a067: from storage DS-605938d1-a5d8-434b-ba41-04362a26f489 node DatanodeRegistration(127.0.0.1:45807, datanodeUuid=99742ee7-a7b7-4c5f-98e2-68bae2abba81, infoPort=33047, infoSecurePort=0, ipcPort=42675, storageInfo=lv=-57;cid=testClusterID;nsid=1910325846;c=1732562834373), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:27:14,732 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa851f2c0a7f1845e with lease ID 0xf3083a6cc7f6a067: Processing first storage report for DS-40771b07-a943-43f4-b1b7-2f3422356817 from datanode DatanodeRegistration(127.0.0.1:45807, datanodeUuid=99742ee7-a7b7-4c5f-98e2-68bae2abba81, infoPort=33047, infoSecurePort=0, ipcPort=42675, storageInfo=lv=-57;cid=testClusterID;nsid=1910325846;c=1732562834373) 2024-11-25T19:27:14,732 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa851f2c0a7f1845e with lease ID 0xf3083a6cc7f6a067: from storage DS-40771b07-a943-43f4-b1b7-2f3422356817 node DatanodeRegistration(127.0.0.1:45807, datanodeUuid=99742ee7-a7b7-4c5f-98e2-68bae2abba81, infoPort=33047, infoSecurePort=0, ipcPort=42675, storageInfo=lv=-57;cid=testClusterID;nsid=1910325846;c=1732562834373), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:27:14,785 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6c243e85{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/java.io.tmpdir/jetty-localhost-45541-hadoop-hdfs-3_4_1-tests_jar-_-any-3982463590545806514/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:27:14,786 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@231eb320{HTTP/1.1, (http/1.1)}{localhost:45541} 2024-11-25T19:27:14,786 INFO [Time-limited test {}] server.Server(415): Started @185374ms 2024-11-25T19:27:14,787 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T19:27:14,840 WARN [Thread-1656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/cluster_ac4d3f7e-84df-9155-e77b-9a9c502acb43/data/data4/current/BP-1570157930-172.17.0.2-1732562834373/current, will proceed with Du for space computation calculation, 2024-11-25T19:27:14,840 WARN [Thread-1655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/cluster_ac4d3f7e-84df-9155-e77b-9a9c502acb43/data/data3/current/BP-1570157930-172.17.0.2-1732562834373/current, will proceed with Du for space computation calculation, 2024-11-25T19:27:14,855 WARN [Thread-1644 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T19:27:14,857 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc440258e42bd07fa with lease ID 0xf3083a6cc7f6a068: Processing first storage report for DS-5489f42c-607f-4e47-b22f-bc1b9c9d79ec from datanode DatanodeRegistration(127.0.0.1:45043, datanodeUuid=47e8db2c-70db-4c41-a903-6d1e1ad16475, infoPort=34339, infoSecurePort=0, ipcPort=34853, storageInfo=lv=-57;cid=testClusterID;nsid=1910325846;c=1732562834373) 2024-11-25T19:27:14,857 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc440258e42bd07fa with lease ID 0xf3083a6cc7f6a068: from storage DS-5489f42c-607f-4e47-b22f-bc1b9c9d79ec node DatanodeRegistration(127.0.0.1:45043, datanodeUuid=47e8db2c-70db-4c41-a903-6d1e1ad16475, infoPort=34339, infoSecurePort=0, ipcPort=34853, storageInfo=lv=-57;cid=testClusterID;nsid=1910325846;c=1732562834373), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:27:14,857 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc440258e42bd07fa with lease ID 0xf3083a6cc7f6a068: Processing first storage report for DS-16f14446-3a01-4af3-9b46-66c1e38ed69f from datanode DatanodeRegistration(127.0.0.1:45043, datanodeUuid=47e8db2c-70db-4c41-a903-6d1e1ad16475, infoPort=34339, infoSecurePort=0, ipcPort=34853, storageInfo=lv=-57;cid=testClusterID;nsid=1910325846;c=1732562834373) 2024-11-25T19:27:14,857 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc440258e42bd07fa with lease ID 0xf3083a6cc7f6a068: from storage DS-16f14446-3a01-4af3-9b46-66c1e38ed69f node DatanodeRegistration(127.0.0.1:45043, datanodeUuid=47e8db2c-70db-4c41-a903-6d1e1ad16475, infoPort=34339, infoSecurePort=0, ipcPort=34853, storageInfo=lv=-57;cid=testClusterID;nsid=1910325846;c=1732562834373), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:27:14,911 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288 2024-11-25T19:27:14,915 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/cluster_ac4d3f7e-84df-9155-e77b-9a9c502acb43/zookeeper_0, clientPort=50449, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/cluster_ac4d3f7e-84df-9155-e77b-9a9c502acb43/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/cluster_ac4d3f7e-84df-9155-e77b-9a9c502acb43/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T19:27:14,916 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50449 2024-11-25T19:27:14,916 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:27:14,918 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:27:14,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741825_1001 (size=7) 2024-11-25T19:27:14,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741825_1001 (size=7) 2024-11-25T19:27:14,929 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158 with version=8 2024-11-25T19:27:14,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/hbase-staging 2024-11-25T19:27:14,931 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6ef6ccb75414:0 server-side Connection retries=45 2024-11-25T19:27:14,931 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:27:14,931 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T19:27:14,931 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T19:27:14,931 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:27:14,931 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T19:27:14,931 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T19:27:14,931 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T19:27:14,932 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44919 2024-11-25T19:27:14,933 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44919 connecting to ZooKeeper ensemble=127.0.0.1:50449 2024-11-25T19:27:14,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:449190x0, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T19:27:14,937 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44919-0x100785a32500000 connected 2024-11-25T19:27:14,948 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:27:14,950 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:27:14,952 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:27:14,953 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158, hbase.cluster.distributed=false 2024-11-25T19:27:14,955 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T19:27:14,956 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44919 2024-11-25T19:27:14,956 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44919 2024-11-25T19:27:14,956 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44919 2024-11-25T19:27:14,957 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44919 2024-11-25T19:27:14,957 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44919 2024-11-25T19:27:14,976 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6ef6ccb75414:0 server-side Connection retries=45 2024-11-25T19:27:14,976 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:27:14,976 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T19:27:14,976 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T19:27:14,976 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:27:14,976 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T19:27:14,976 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T19:27:14,976 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T19:27:14,977 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45469 2024-11-25T19:27:14,979 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45469 connecting to ZooKeeper ensemble=127.0.0.1:50449 2024-11-25T19:27:14,979 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:27:14,981 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:27:14,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:454690x0, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T19:27:14,985 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45469-0x100785a32500001 connected 2024-11-25T19:27:14,985 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45469-0x100785a32500001, quorum=127.0.0.1:50449, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:27:14,985 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T19:27:14,986 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T19:27:14,986 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45469-0x100785a32500001, quorum=127.0.0.1:50449, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T19:27:14,987 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45469-0x100785a32500001, quorum=127.0.0.1:50449, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T19:27:14,988 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45469 2024-11-25T19:27:14,989 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45469 2024-11-25T19:27:14,990 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45469 2024-11-25T19:27:14,991 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45469 2024-11-25T19:27:14,991 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45469 2024-11-25T19:27:15,001 DEBUG [M:0;6ef6ccb75414:44919 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6ef6ccb75414:44919 2024-11-25T19:27:15,001 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6ef6ccb75414,44919,1732562834930 2024-11-25T19:27:15,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45469-0x100785a32500001, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:27:15,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:27:15,003 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6ef6ccb75414,44919,1732562834930 2024-11-25T19:27:15,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45469-0x100785a32500001, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T19:27:15,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:27:15,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45469-0x100785a32500001, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:27:15,004 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T19:27:15,005 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6ef6ccb75414,44919,1732562834930 from backup master directory 2024-11-25T19:27:15,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6ef6ccb75414,44919,1732562834930 2024-11-25T19:27:15,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45469-0x100785a32500001, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:27:15,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:27:15,005 WARN [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T19:27:15,005 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6ef6ccb75414,44919,1732562834930 2024-11-25T19:27:15,009 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/hbase.id] with ID: 287745b7-0fda-4591-8a7b-7082d55b2d1f 2024-11-25T19:27:15,009 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/.tmp/hbase.id 2024-11-25T19:27:15,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741826_1002 (size=42) 2024-11-25T19:27:15,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741826_1002 (size=42) 2024-11-25T19:27:15,016 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/.tmp/hbase.id]:[hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/hbase.id] 2024-11-25T19:27:15,026 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:27:15,026 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T19:27:15,027 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-25T19:27:15,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:27:15,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45469-0x100785a32500001, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:27:15,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741827_1003 (size=196) 2024-11-25T19:27:15,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741827_1003 (size=196) 2024-11-25T19:27:15,038 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T19:27:15,039 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T19:27:15,039 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:27:15,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741828_1004 (size=1189) 2024-11-25T19:27:15,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741828_1004 (size=1189) 2024-11-25T19:27:15,047 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store 2024-11-25T19:27:15,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741829_1005 (size=34) 2024-11-25T19:27:15,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741829_1005 (size=34) 2024-11-25T19:27:15,054 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:27:15,054 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T19:27:15,054 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:27:15,054 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:27:15,054 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T19:27:15,055 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:27:15,055 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:27:15,055 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732562835054Disabling compacts and flushes for region at 1732562835054Disabling writes for close at 1732562835055 (+1 ms)Writing region close event to WAL at 1732562835055Closed at 1732562835055 2024-11-25T19:27:15,055 WARN [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/.initializing 2024-11-25T19:27:15,056 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/WALs/6ef6ccb75414,44919,1732562834930 2024-11-25T19:27:15,058 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ef6ccb75414%2C44919%2C1732562834930, suffix=, logDir=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/WALs/6ef6ccb75414,44919,1732562834930, archiveDir=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/oldWALs, maxLogs=10 2024-11-25T19:27:15,059 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C44919%2C1732562834930.1732562835058 2024-11-25T19:27:15,063 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/WALs/6ef6ccb75414,44919,1732562834930/6ef6ccb75414%2C44919%2C1732562834930.1732562835058 2024-11-25T19:27:15,064 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34339:34339),(127.0.0.1/127.0.0.1:33047:33047)] 2024-11-25T19:27:15,065 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T19:27:15,065 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:27:15,065 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:27:15,065 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:27:15,067 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:27:15,069 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T19:27:15,069 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:27:15,069 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:27:15,070 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:27:15,070 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T19:27:15,071 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:27:15,071 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:27:15,071 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:27:15,072 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T19:27:15,072 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:27:15,072 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:27:15,073 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:27:15,074 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T19:27:15,074 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:27:15,074 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:27:15,075 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:27:15,075 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:27:15,076 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:27:15,077 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:27:15,077 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:27:15,078 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T19:27:15,079 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:27:15,081 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T19:27:15,082 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=881880, jitterRate=0.12136872112751007}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T19:27:15,082 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732562835065Initializing all the Stores at 1732562835066 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562835066Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562835067 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562835067Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562835067Cleaning up temporary data from old regions at 1732562835077 (+10 ms)Region opened successfully at 1732562835082 (+5 ms) 2024-11-25T19:27:15,083 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T19:27:15,085 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62910cc3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ef6ccb75414/172.17.0.2:0 2024-11-25T19:27:15,086 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T19:27:15,086 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T19:27:15,087 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T19:27:15,087 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T19:27:15,087 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-25T19:27:15,088 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-25T19:27:15,088 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T19:27:15,090 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T19:27:15,091 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T19:27:15,091 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T19:27:15,092 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T19:27:15,092 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T19:27:15,093 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T19:27:15,093 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T19:27:15,094 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T19:27:15,095 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T19:27:15,096 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T19:27:15,096 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T19:27:15,098 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T19:27:15,098 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T19:27:15,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T19:27:15,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45469-0x100785a32500001, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T19:27:15,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:27:15,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45469-0x100785a32500001, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:27:15,100 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6ef6ccb75414,44919,1732562834930, sessionid=0x100785a32500000, setting cluster-up flag (Was=false) 2024-11-25T19:27:15,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:27:15,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45469-0x100785a32500001, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:27:15,104 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T19:27:15,105 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6ef6ccb75414,44919,1732562834930 2024-11-25T19:27:15,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:27:15,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45469-0x100785a32500001, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:27:15,109 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T19:27:15,110 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6ef6ccb75414,44919,1732562834930 2024-11-25T19:27:15,111 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T19:27:15,113 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T19:27:15,113 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T19:27:15,113 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T19:27:15,113 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6ef6ccb75414,44919,1732562834930 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T19:27:15,115 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:27:15,115 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:27:15,115 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:27:15,115 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:27:15,115 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6ef6ccb75414:0, corePoolSize=10, maxPoolSize=10 2024-11-25T19:27:15,115 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:27:15,115 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=2, maxPoolSize=2 2024-11-25T19:27:15,115 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:27:15,116 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732562865116 2024-11-25T19:27:15,116 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T19:27:15,116 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T19:27:15,116 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T19:27:15,116 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T19:27:15,116 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T19:27:15,116 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T19:27:15,116 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T19:27:15,117 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T19:27:15,117 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T19:27:15,117 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T19:27:15,117 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T19:27:15,117 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T19:27:15,117 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T19:27:15,117 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T19:27:15,117 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.large.0-1732562835117,5,FailOnTimeoutGroup] 2024-11-25T19:27:15,117 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.small.0-1732562835117,5,FailOnTimeoutGroup] 2024-11-25T19:27:15,117 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T19:27:15,118 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T19:27:15,118 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T19:27:15,118 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T19:27:15,118 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:27:15,118 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T19:27:15,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741831_1007 (size=1321) 2024-11-25T19:27:15,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741831_1007 (size=1321) 2024-11-25T19:27:15,125 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T19:27:15,126 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158 2024-11-25T19:27:15,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741832_1008 (size=32) 2024-11-25T19:27:15,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741832_1008 (size=32) 2024-11-25T19:27:15,133 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:27:15,134 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T19:27:15,135 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T19:27:15,135 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:27:15,136 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:27:15,136 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T19:27:15,137 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T19:27:15,137 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:27:15,137 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:27:15,137 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T19:27:15,138 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T19:27:15,138 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:27:15,139 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:27:15,139 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T19:27:15,140 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T19:27:15,140 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:27:15,140 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:27:15,140 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T19:27:15,141 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/hbase/meta/1588230740 2024-11-25T19:27:15,141 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/hbase/meta/1588230740 2024-11-25T19:27:15,142 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T19:27:15,142 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T19:27:15,143 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T19:27:15,144 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T19:27:15,146 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T19:27:15,147 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=852635, jitterRate=0.08418199419975281}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T19:27:15,147 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732562835133Initializing all the Stores at 1732562835134 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562835134Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562835134Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562835134Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562835134Cleaning up temporary data from old regions at 1732562835142 (+8 ms)Region opened successfully at 1732562835147 (+5 ms) 2024-11-25T19:27:15,147 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T19:27:15,147 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T19:27:15,147 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T19:27:15,147 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T19:27:15,148 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T19:27:15,148 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T19:27:15,148 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732562835147Disabling compacts and flushes for region at 1732562835147Disabling writes for close at 1732562835147Writing region close event to WAL at 1732562835148 (+1 ms)Closed at 1732562835148 2024-11-25T19:27:15,149 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T19:27:15,149 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T19:27:15,150 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T19:27:15,151 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T19:27:15,152 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T19:27:15,195 INFO [RS:0;6ef6ccb75414:45469 {}] regionserver.HRegionServer(746): ClusterId : 287745b7-0fda-4591-8a7b-7082d55b2d1f 2024-11-25T19:27:15,195 DEBUG [RS:0;6ef6ccb75414:45469 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T19:27:15,199 DEBUG [RS:0;6ef6ccb75414:45469 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T19:27:15,199 DEBUG [RS:0;6ef6ccb75414:45469 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T19:27:15,204 DEBUG [RS:0;6ef6ccb75414:45469 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T19:27:15,204 DEBUG [RS:0;6ef6ccb75414:45469 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a480ca4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ef6ccb75414/172.17.0.2:0 2024-11-25T19:27:15,220 DEBUG [RS:0;6ef6ccb75414:45469 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6ef6ccb75414:45469 2024-11-25T19:27:15,220 INFO [RS:0;6ef6ccb75414:45469 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T19:27:15,220 INFO [RS:0;6ef6ccb75414:45469 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T19:27:15,220 DEBUG [RS:0;6ef6ccb75414:45469 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T19:27:15,221 INFO [RS:0;6ef6ccb75414:45469 {}] regionserver.HRegionServer(2659): reportForDuty to master=6ef6ccb75414,44919,1732562834930 with port=45469, startcode=1732562834976 2024-11-25T19:27:15,221 DEBUG [RS:0;6ef6ccb75414:45469 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T19:27:15,223 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47005, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T19:27:15,223 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44919 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6ef6ccb75414,45469,1732562834976 2024-11-25T19:27:15,223 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44919 {}] master.ServerManager(517): Registering regionserver=6ef6ccb75414,45469,1732562834976 2024-11-25T19:27:15,224 DEBUG [RS:0;6ef6ccb75414:45469 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158 2024-11-25T19:27:15,225 DEBUG [RS:0;6ef6ccb75414:45469 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44277 2024-11-25T19:27:15,225 DEBUG [RS:0;6ef6ccb75414:45469 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T19:27:15,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T19:27:15,226 DEBUG [RS:0;6ef6ccb75414:45469 {}] zookeeper.ZKUtil(111): regionserver:45469-0x100785a32500001, quorum=127.0.0.1:50449, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6ef6ccb75414,45469,1732562834976 2024-11-25T19:27:15,226 WARN [RS:0;6ef6ccb75414:45469 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T19:27:15,226 INFO [RS:0;6ef6ccb75414:45469 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:27:15,226 DEBUG [RS:0;6ef6ccb75414:45469 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/WALs/6ef6ccb75414,45469,1732562834976 2024-11-25T19:27:15,226 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6ef6ccb75414,45469,1732562834976] 2024-11-25T19:27:15,230 INFO [RS:0;6ef6ccb75414:45469 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T19:27:15,233 INFO [RS:0;6ef6ccb75414:45469 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T19:27:15,233 INFO [RS:0;6ef6ccb75414:45469 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T19:27:15,233 INFO [RS:0;6ef6ccb75414:45469 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:27:15,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:15,233 INFO [RS:0;6ef6ccb75414:45469 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T19:27:15,234 INFO [RS:0;6ef6ccb75414:45469 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T19:27:15,234 INFO [RS:0;6ef6ccb75414:45469 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T19:27:15,234 DEBUG [RS:0;6ef6ccb75414:45469 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:27:15,234 DEBUG [RS:0;6ef6ccb75414:45469 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:27:15,234 DEBUG [RS:0;6ef6ccb75414:45469 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:27:15,235 DEBUG [RS:0;6ef6ccb75414:45469 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:27:15,235 DEBUG [RS:0;6ef6ccb75414:45469 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:27:15,235 DEBUG [RS:0;6ef6ccb75414:45469 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6ef6ccb75414:0, corePoolSize=2, maxPoolSize=2 2024-11-25T19:27:15,235 DEBUG [RS:0;6ef6ccb75414:45469 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:27:15,235 DEBUG [RS:0;6ef6ccb75414:45469 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:27:15,235 DEBUG [RS:0;6ef6ccb75414:45469 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:27:15,235 DEBUG [RS:0;6ef6ccb75414:45469 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:27:15,235 DEBUG [RS:0;6ef6ccb75414:45469 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:27:15,235 DEBUG [RS:0;6ef6ccb75414:45469 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:27:15,235 DEBUG [RS:0;6ef6ccb75414:45469 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6ef6ccb75414:0, corePoolSize=3, maxPoolSize=3 2024-11-25T19:27:15,235 DEBUG [RS:0;6ef6ccb75414:45469 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0, corePoolSize=3, maxPoolSize=3 2024-11-25T19:27:15,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:15,236 INFO [RS:0;6ef6ccb75414:45469 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T19:27:15,236 INFO [RS:0;6ef6ccb75414:45469 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T19:27:15,236 INFO [RS:0;6ef6ccb75414:45469 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:27:15,236 INFO [RS:0;6ef6ccb75414:45469 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T19:27:15,236 INFO [RS:0;6ef6ccb75414:45469 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T19:27:15,236 INFO [RS:0;6ef6ccb75414:45469 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,45469,1732562834976-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T19:27:15,249 INFO [RS:0;6ef6ccb75414:45469 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T19:27:15,250 INFO [RS:0;6ef6ccb75414:45469 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,45469,1732562834976-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:27:15,250 INFO [RS:0;6ef6ccb75414:45469 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:27:15,250 INFO [RS:0;6ef6ccb75414:45469 {}] regionserver.Replication(171): 6ef6ccb75414,45469,1732562834976 started 2024-11-25T19:27:15,262 INFO [RS:0;6ef6ccb75414:45469 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:27:15,262 INFO [RS:0;6ef6ccb75414:45469 {}] regionserver.HRegionServer(1482): Serving as 6ef6ccb75414,45469,1732562834976, RpcServer on 6ef6ccb75414/172.17.0.2:45469, sessionid=0x100785a32500001 2024-11-25T19:27:15,262 DEBUG [RS:0;6ef6ccb75414:45469 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T19:27:15,262 DEBUG [RS:0;6ef6ccb75414:45469 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6ef6ccb75414,45469,1732562834976 2024-11-25T19:27:15,262 DEBUG [RS:0;6ef6ccb75414:45469 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ef6ccb75414,45469,1732562834976' 2024-11-25T19:27:15,262 DEBUG [RS:0;6ef6ccb75414:45469 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T19:27:15,263 DEBUG [RS:0;6ef6ccb75414:45469 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T19:27:15,263 DEBUG [RS:0;6ef6ccb75414:45469 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T19:27:15,263 DEBUG [RS:0;6ef6ccb75414:45469 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T19:27:15,263 DEBUG [RS:0;6ef6ccb75414:45469 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6ef6ccb75414,45469,1732562834976 2024-11-25T19:27:15,263 DEBUG [RS:0;6ef6ccb75414:45469 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ef6ccb75414,45469,1732562834976' 2024-11-25T19:27:15,263 DEBUG [RS:0;6ef6ccb75414:45469 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T19:27:15,264 DEBUG [RS:0;6ef6ccb75414:45469 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T19:27:15,264 DEBUG [RS:0;6ef6ccb75414:45469 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T19:27:15,264 INFO [RS:0;6ef6ccb75414:45469 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T19:27:15,264 INFO [RS:0;6ef6ccb75414:45469 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T19:27:15,303 WARN [6ef6ccb75414:44919 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-25T19:27:15,367 INFO [RS:0;6ef6ccb75414:45469 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ef6ccb75414%2C45469%2C1732562834976, suffix=, logDir=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/WALs/6ef6ccb75414,45469,1732562834976, archiveDir=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/oldWALs, maxLogs=32 2024-11-25T19:27:15,368 INFO [RS:0;6ef6ccb75414:45469 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C45469%2C1732562834976.1732562835368 2024-11-25T19:27:15,378 INFO [RS:0;6ef6ccb75414:45469 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/WALs/6ef6ccb75414,45469,1732562834976/6ef6ccb75414%2C45469%2C1732562834976.1732562835368 2024-11-25T19:27:15,379 DEBUG [RS:0;6ef6ccb75414:45469 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33047:33047),(127.0.0.1/127.0.0.1:34339:34339)] 2024-11-25T19:27:15,385 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T19:27:15,385 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T19:27:15,385 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-25T19:27:15,553 DEBUG [6ef6ccb75414:44919 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-25T19:27:15,554 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6ef6ccb75414,45469,1732562834976 2024-11-25T19:27:15,558 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6ef6ccb75414,45469,1732562834976, state=OPENING 2024-11-25T19:27:15,560 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T19:27:15,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:27:15,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45469-0x100785a32500001, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:27:15,563 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:27:15,563 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:27:15,563 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T19:27:15,564 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6ef6ccb75414,45469,1732562834976}] 2024-11-25T19:27:15,718 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T19:27:15,722 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56385, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T19:27:15,730 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T19:27:15,730 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:27:15,733 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ef6ccb75414%2C45469%2C1732562834976.meta, suffix=.meta, logDir=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/WALs/6ef6ccb75414,45469,1732562834976, archiveDir=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/oldWALs, maxLogs=32 2024-11-25T19:27:15,733 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C45469%2C1732562834976.meta.1732562835733.meta 2024-11-25T19:27:15,743 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/WALs/6ef6ccb75414,45469,1732562834976/6ef6ccb75414%2C45469%2C1732562834976.meta.1732562835733.meta 2024-11-25T19:27:15,745 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34339:34339),(127.0.0.1/127.0.0.1:33047:33047)] 2024-11-25T19:27:15,745 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T19:27:15,746 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T19:27:15,746 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T19:27:15,746 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T19:27:15,746 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T19:27:15,746 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:27:15,746 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T19:27:15,746 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T19:27:15,747 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T19:27:15,748 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T19:27:15,748 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:27:15,749 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:27:15,749 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T19:27:15,749 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T19:27:15,750 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:27:15,750 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:27:15,750 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T19:27:15,751 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T19:27:15,751 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:27:15,751 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:27:15,751 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T19:27:15,752 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T19:27:15,752 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:27:15,752 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:27:15,753 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T19:27:15,753 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/hbase/meta/1588230740 2024-11-25T19:27:15,754 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/hbase/meta/1588230740 2024-11-25T19:27:15,756 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T19:27:15,756 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T19:27:15,756 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T19:27:15,758 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T19:27:15,759 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=882227, jitterRate=0.1218099594116211}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T19:27:15,759 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T19:27:15,760 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732562835746Writing region info on filesystem at 1732562835746Initializing all the Stores at 1732562835747 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562835747Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562835747Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562835747Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562835747Cleaning up temporary data from old regions at 1732562835756 (+9 ms)Running coprocessor post-open hooks at 1732562835759 (+3 ms)Region opened successfully at 1732562835760 (+1 ms) 2024-11-25T19:27:15,761 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732562835718 2024-11-25T19:27:15,764 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T19:27:15,764 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T19:27:15,765 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6ef6ccb75414,45469,1732562834976 2024-11-25T19:27:15,766 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6ef6ccb75414,45469,1732562834976, state=OPEN 2024-11-25T19:27:15,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T19:27:15,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45469-0x100785a32500001, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T19:27:15,768 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6ef6ccb75414,45469,1732562834976 2024-11-25T19:27:15,768 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:27:15,768 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:27:15,771 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T19:27:15,771 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6ef6ccb75414,45469,1732562834976 in 204 msec 2024-11-25T19:27:15,774 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T19:27:15,774 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 621 msec 2024-11-25T19:27:15,774 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T19:27:15,774 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T19:27:15,776 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T19:27:15,776 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6ef6ccb75414,45469,1732562834976, seqNum=-1] 2024-11-25T19:27:15,776 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T19:27:15,777 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44011, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T19:27:15,784 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 670 msec 2024-11-25T19:27:15,784 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732562835784, completionTime=-1 2024-11-25T19:27:15,784 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-25T19:27:15,784 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T19:27:15,786 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-25T19:27:15,786 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732562895786 2024-11-25T19:27:15,786 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732562955786 2024-11-25T19:27:15,786 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-25T19:27:15,786 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,44919,1732562834930-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:27:15,787 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,44919,1732562834930-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:27:15,787 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,44919,1732562834930-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:27:15,787 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6ef6ccb75414:44919, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:27:15,787 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T19:27:15,787 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T19:27:15,788 DEBUG [master/6ef6ccb75414:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T19:27:15,790 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.785sec 2024-11-25T19:27:15,790 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T19:27:15,790 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T19:27:15,790 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T19:27:15,791 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T19:27:15,791 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T19:27:15,791 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,44919,1732562834930-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T19:27:15,791 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,44919,1732562834930-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T19:27:15,793 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T19:27:15,793 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T19:27:15,793 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@311facd9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T19:27:15,793 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,44919,1732562834930-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:27:15,793 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6ef6ccb75414,44919,-1 for getting cluster id 2024-11-25T19:27:15,793 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T19:27:15,795 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '287745b7-0fda-4591-8a7b-7082d55b2d1f' 2024-11-25T19:27:15,795 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T19:27:15,795 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "287745b7-0fda-4591-8a7b-7082d55b2d1f" 2024-11-25T19:27:15,796 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d0a052, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T19:27:15,796 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6ef6ccb75414,44919,-1] 2024-11-25T19:27:15,796 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T19:27:15,796 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:27:15,797 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56130, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T19:27:15,798 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@317cfbff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T19:27:15,798 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T19:27:15,799 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6ef6ccb75414,45469,1732562834976, seqNum=-1] 2024-11-25T19:27:15,799 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T19:27:15,800 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56940, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T19:27:15,802 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6ef6ccb75414,44919,1732562834930 2024-11-25T19:27:15,802 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:27:15,804 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-25T19:27:15,805 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-25T19:27:15,806 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 6ef6ccb75414,44919,1732562834930 2024-11-25T19:27:15,806 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@55af8ae1 2024-11-25T19:27:15,806 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T19:27:15,807 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56144, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T19:27:15,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-25T19:27:15,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-25T19:27:15,808 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T19:27:15,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T19:27:15,810 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T19:27:15,811 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:27:15,811 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-25T19:27:15,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T19:27:15,812 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T19:27:15,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741835_1011 (size=405) 2024-11-25T19:27:15,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741835_1011 (size=405) 2024-11-25T19:27:15,824 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8e71bb565de9c8c7f7965ebf6f10e6b3, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158 2024-11-25T19:27:15,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741836_1012 (size=88) 2024-11-25T19:27:15,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741836_1012 (size=88) 2024-11-25T19:27:15,832 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:27:15,832 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 8e71bb565de9c8c7f7965ebf6f10e6b3, disabling compactions & flushes 2024-11-25T19:27:15,832 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. 2024-11-25T19:27:15,832 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. 2024-11-25T19:27:15,832 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. after waiting 0 ms 2024-11-25T19:27:15,832 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. 2024-11-25T19:27:15,832 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. 2024-11-25T19:27:15,832 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8e71bb565de9c8c7f7965ebf6f10e6b3: Waiting for close lock at 1732562835832Disabling compacts and flushes for region at 1732562835832Disabling writes for close at 1732562835832Writing region close event to WAL at 1732562835832Closed at 1732562835832 2024-11-25T19:27:15,834 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T19:27:15,834 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732562835834"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732562835834"}]},"ts":"1732562835834"} 2024-11-25T19:27:15,837 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-25T19:27:15,838 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T19:27:15,838 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732562835838"}]},"ts":"1732562835838"} 2024-11-25T19:27:15,841 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-25T19:27:15,841 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8e71bb565de9c8c7f7965ebf6f10e6b3, ASSIGN}] 2024-11-25T19:27:15,843 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8e71bb565de9c8c7f7965ebf6f10e6b3, ASSIGN 2024-11-25T19:27:15,844 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8e71bb565de9c8c7f7965ebf6f10e6b3, ASSIGN; state=OFFLINE, location=6ef6ccb75414,45469,1732562834976; forceNewPlan=false, retain=false 2024-11-25T19:27:15,995 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8e71bb565de9c8c7f7965ebf6f10e6b3, regionState=OPENING, regionLocation=6ef6ccb75414,45469,1732562834976 2024-11-25T19:27:15,997 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8e71bb565de9c8c7f7965ebf6f10e6b3, ASSIGN because future has completed 2024-11-25T19:27:15,998 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8e71bb565de9c8c7f7965ebf6f10e6b3, server=6ef6ccb75414,45469,1732562834976}] 2024-11-25T19:27:16,155 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. 2024-11-25T19:27:16,155 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 8e71bb565de9c8c7f7965ebf6f10e6b3, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3.', STARTKEY => '', ENDKEY => ''} 2024-11-25T19:27:16,155 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 8e71bb565de9c8c7f7965ebf6f10e6b3 2024-11-25T19:27:16,155 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:27:16,156 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 8e71bb565de9c8c7f7965ebf6f10e6b3 2024-11-25T19:27:16,156 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 8e71bb565de9c8c7f7965ebf6f10e6b3 2024-11-25T19:27:16,157 INFO [StoreOpener-8e71bb565de9c8c7f7965ebf6f10e6b3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8e71bb565de9c8c7f7965ebf6f10e6b3 2024-11-25T19:27:16,159 INFO [StoreOpener-8e71bb565de9c8c7f7965ebf6f10e6b3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8e71bb565de9c8c7f7965ebf6f10e6b3 columnFamilyName info 2024-11-25T19:27:16,159 DEBUG [StoreOpener-8e71bb565de9c8c7f7965ebf6f10e6b3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:27:16,159 INFO [StoreOpener-8e71bb565de9c8c7f7965ebf6f10e6b3-1 {}] regionserver.HStore(327): Store=8e71bb565de9c8c7f7965ebf6f10e6b3/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:27:16,160 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 8e71bb565de9c8c7f7965ebf6f10e6b3 2024-11-25T19:27:16,160 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3 2024-11-25T19:27:16,161 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3 2024-11-25T19:27:16,161 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 8e71bb565de9c8c7f7965ebf6f10e6b3 2024-11-25T19:27:16,161 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 8e71bb565de9c8c7f7965ebf6f10e6b3 2024-11-25T19:27:16,163 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 8e71bb565de9c8c7f7965ebf6f10e6b3 2024-11-25T19:27:16,165 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T19:27:16,166 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 8e71bb565de9c8c7f7965ebf6f10e6b3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=704136, jitterRate=-0.10464498400688171}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T19:27:16,166 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8e71bb565de9c8c7f7965ebf6f10e6b3 2024-11-25T19:27:16,166 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 8e71bb565de9c8c7f7965ebf6f10e6b3: Running coprocessor pre-open hook at 1732562836156Writing region info on filesystem at 1732562836156Initializing all the Stores at 1732562836157 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562836157Cleaning up temporary data from old regions at 1732562836161 (+4 ms)Running coprocessor post-open hooks at 1732562836166 (+5 ms)Region opened successfully at 1732562836166 2024-11-25T19:27:16,167 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3., pid=6, masterSystemTime=1732562836150 2024-11-25T19:27:16,169 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. 2024-11-25T19:27:16,170 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. 2024-11-25T19:27:16,171 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8e71bb565de9c8c7f7965ebf6f10e6b3, regionState=OPEN, openSeqNum=2, regionLocation=6ef6ccb75414,45469,1732562834976 2024-11-25T19:27:16,173 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8e71bb565de9c8c7f7965ebf6f10e6b3, server=6ef6ccb75414,45469,1732562834976 because future has completed 2024-11-25T19:27:16,177 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-25T19:27:16,177 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 8e71bb565de9c8c7f7965ebf6f10e6b3, server=6ef6ccb75414,45469,1732562834976 in 176 msec 2024-11-25T19:27:16,180 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-25T19:27:16,180 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8e71bb565de9c8c7f7965ebf6f10e6b3, ASSIGN in 336 msec 2024-11-25T19:27:16,181 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T19:27:16,181 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732562836181"}]},"ts":"1732562836181"} 2024-11-25T19:27:16,183 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-25T19:27:16,184 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T19:27:16,186 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 376 msec 2024-11-25T19:27:16,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:16,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:17,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:17,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:18,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:18,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:19,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:19,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:20,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:20,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:20,891 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T19:27:20,893 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:20,893 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:20,894 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:20,895 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:20,895 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:20,896 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:20,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:20,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:20,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:20,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:20,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:20,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:20,927 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:20,928 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:20,928 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:20,931 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:27:21,230 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-25T19:27:21,232 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-25T19:27:21,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:21,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:22,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:22,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:23,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:23,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:24,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:24,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:25,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:25,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:25,385 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T19:27:25,385 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-25T19:27:25,386 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T19:27:25,386 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-25T19:27:25,386 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T19:27:25,386 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-25T19:27:25,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T19:27:25,838 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-25T19:27:25,838 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-25T19:27:25,845 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T19:27:25,845 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. 2024-11-25T19:27:25,849 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3., hostname=6ef6ccb75414,45469,1732562834976, seqNum=2] 2024-11-25T19:27:25,855 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T19:27:25,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T19:27:25,862 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-25T19:27:25,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-25T19:27:25,863 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T19:27:25,864 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T19:27:26,029 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45469 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-25T19:27:26,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. 2024-11-25T19:27:26,030 INFO [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 8e71bb565de9c8c7f7965ebf6f10e6b3 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-25T19:27:26,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/.tmp/info/acaf0ec7cc1b4ae7b6c08d718f89cd18 is 1080, key is row0001/info:/1732562845850/Put/seqid=0 2024-11-25T19:27:26,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741837_1013 (size=6033) 2024-11-25T19:27:26,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741837_1013 (size=6033) 2024-11-25T19:27:26,054 INFO [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/.tmp/info/acaf0ec7cc1b4ae7b6c08d718f89cd18 2024-11-25T19:27:26,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/.tmp/info/acaf0ec7cc1b4ae7b6c08d718f89cd18 as hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/acaf0ec7cc1b4ae7b6c08d718f89cd18 2024-11-25T19:27:26,066 INFO [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/acaf0ec7cc1b4ae7b6c08d718f89cd18, entries=1, sequenceid=5, filesize=5.9 K 2024-11-25T19:27:26,067 INFO [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8e71bb565de9c8c7f7965ebf6f10e6b3 in 38ms, sequenceid=5, compaction requested=false 2024-11-25T19:27:26,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 8e71bb565de9c8c7f7965ebf6f10e6b3: 2024-11-25T19:27:26,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. 2024-11-25T19:27:26,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-25T19:27:26,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-25T19:27:26,076 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-25T19:27:26,076 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 209 msec 2024-11-25T19:27:26,079 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 220 msec 2024-11-25T19:27:26,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:26,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:27,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:27,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:28,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:28,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:29,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:29,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:30,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:30,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:31,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:31,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:32,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:32,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:33,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:33,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:34,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:34,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:35,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:35,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:35,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-25T19:27:35,918 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-25T19:27:35,926 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T19:27:35,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T19:27:35,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-25T19:27:35,931 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-25T19:27:35,933 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T19:27:35,933 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T19:27:36,087 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45469 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-25T19:27:36,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. 2024-11-25T19:27:36,089 INFO [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 8e71bb565de9c8c7f7965ebf6f10e6b3 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-25T19:27:36,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/.tmp/info/ab0a369d7819419aae67dc764afc41d4 is 1080, key is row0002/info:/1732562855921/Put/seqid=0 2024-11-25T19:27:36,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741838_1014 (size=6033) 2024-11-25T19:27:36,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741838_1014 (size=6033) 2024-11-25T19:27:36,105 INFO [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/.tmp/info/ab0a369d7819419aae67dc764afc41d4 2024-11-25T19:27:36,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/.tmp/info/ab0a369d7819419aae67dc764afc41d4 as hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/ab0a369d7819419aae67dc764afc41d4 2024-11-25T19:27:36,117 INFO [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/ab0a369d7819419aae67dc764afc41d4, entries=1, sequenceid=9, filesize=5.9 K 2024-11-25T19:27:36,118 INFO [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8e71bb565de9c8c7f7965ebf6f10e6b3 in 30ms, sequenceid=9, compaction requested=false 2024-11-25T19:27:36,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 8e71bb565de9c8c7f7965ebf6f10e6b3: 2024-11-25T19:27:36,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. 2024-11-25T19:27:36,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-25T19:27:36,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-25T19:27:36,122 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-25T19:27:36,122 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 187 msec 2024-11-25T19:27:36,124 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 197 msec 2024-11-25T19:27:36,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:36,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:37,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:37,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:38,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:38,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:39,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:39,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:40,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:40,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:41,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:41,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:42,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:42,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:42,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 after 68073ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:27:42,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta after 68064ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T19:27:43,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:43,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:44,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:44,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:44,911 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T19:27:45,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:45,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:46,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-25T19:27:46,027 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-25T19:27:46,030 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C45469%2C1732562834976.1732562866030 2024-11-25T19:27:46,038 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:46,039 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:46,039 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:46,039 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:46,039 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:46,039 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/WALs/6ef6ccb75414,45469,1732562834976/6ef6ccb75414%2C45469%2C1732562834976.1732562835368 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/WALs/6ef6ccb75414,45469,1732562834976/6ef6ccb75414%2C45469%2C1732562834976.1732562866030 2024-11-25T19:27:46,041 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33047:33047),(127.0.0.1/127.0.0.1:34339:34339)] 2024-11-25T19:27:46,041 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/WALs/6ef6ccb75414,45469,1732562834976/6ef6ccb75414%2C45469%2C1732562834976.1732562835368 is not closed yet, will try archiving it next time 2024-11-25T19:27:46,042 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T19:27:46,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741833_1009 (size=5546) 2024-11-25T19:27:46,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741833_1009 (size=5546) 2024-11-25T19:27:46,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T19:27:46,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-25T19:27:46,045 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-25T19:27:46,046 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T19:27:46,046 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T19:27:46,200 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45469 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-25T19:27:46,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. 2024-11-25T19:27:46,202 INFO [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 8e71bb565de9c8c7f7965ebf6f10e6b3 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-25T19:27:46,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/.tmp/info/35e73d7e7d3b423286fdade803b9fd17 is 1080, key is row0003/info:/1732562866028/Put/seqid=0 2024-11-25T19:27:46,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741840_1016 (size=6033) 2024-11-25T19:27:46,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741840_1016 (size=6033) 2024-11-25T19:27:46,219 INFO [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/.tmp/info/35e73d7e7d3b423286fdade803b9fd17 2024-11-25T19:27:46,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/.tmp/info/35e73d7e7d3b423286fdade803b9fd17 as hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/35e73d7e7d3b423286fdade803b9fd17 2024-11-25T19:27:46,232 INFO [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/35e73d7e7d3b423286fdade803b9fd17, entries=1, sequenceid=13, filesize=5.9 K 2024-11-25T19:27:46,233 INFO [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8e71bb565de9c8c7f7965ebf6f10e6b3 in 32ms, sequenceid=13, compaction requested=true 2024-11-25T19:27:46,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 8e71bb565de9c8c7f7965ebf6f10e6b3: 2024-11-25T19:27:46,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. 2024-11-25T19:27:46,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-25T19:27:46,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-25T19:27:46,237 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-25T19:27:46,237 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 189 msec 2024-11-25T19:27:46,240 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 196 msec 2024-11-25T19:27:46,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:46,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:47,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:47,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:48,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:48,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:49,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:49,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:50,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:50,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:51,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:51,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:52,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:52,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:53,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:53,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:54,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:54,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:55,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:55,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:56,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-25T19:27:56,138 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-25T19:27:56,139 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T19:27:56,143 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T19:27:56,143 DEBUG [Time-limited test {}] regionserver.HStore(1541): 8e71bb565de9c8c7f7965ebf6f10e6b3/info is initiating minor compaction (all files) 2024-11-25T19:27:56,143 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T19:27:56,144 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:27:56,144 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 8e71bb565de9c8c7f7965ebf6f10e6b3/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. 2024-11-25T19:27:56,144 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/acaf0ec7cc1b4ae7b6c08d718f89cd18, hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/ab0a369d7819419aae67dc764afc41d4, hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/35e73d7e7d3b423286fdade803b9fd17] into tmpdir=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/.tmp, totalSize=17.7 K 2024-11-25T19:27:56,145 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting acaf0ec7cc1b4ae7b6c08d718f89cd18, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732562845850 2024-11-25T19:27:56,146 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting ab0a369d7819419aae67dc764afc41d4, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732562855921 2024-11-25T19:27:56,147 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 35e73d7e7d3b423286fdade803b9fd17, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732562866028 2024-11-25T19:27:56,159 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 8e71bb565de9c8c7f7965ebf6f10e6b3#info#compaction#44 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T19:27:56,159 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/.tmp/info/0e86ab64db6b4a129d239d6c4d3c0ad6 is 1080, key is row0001/info:/1732562845850/Put/seqid=0 2024-11-25T19:27:56,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741841_1017 (size=8296) 2024-11-25T19:27:56,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741841_1017 (size=8296) 2024-11-25T19:27:56,170 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/.tmp/info/0e86ab64db6b4a129d239d6c4d3c0ad6 as hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/0e86ab64db6b4a129d239d6c4d3c0ad6 2024-11-25T19:27:56,178 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8e71bb565de9c8c7f7965ebf6f10e6b3/info of 8e71bb565de9c8c7f7965ebf6f10e6b3 into 0e86ab64db6b4a129d239d6c4d3c0ad6(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T19:27:56,178 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 8e71bb565de9c8c7f7965ebf6f10e6b3: 2024-11-25T19:27:56,180 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C45469%2C1732562834976.1732562876180 2024-11-25T19:27:56,186 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:56,186 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:56,186 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:56,186 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:56,186 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:27:56,186 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/WALs/6ef6ccb75414,45469,1732562834976/6ef6ccb75414%2C45469%2C1732562834976.1732562866030 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/WALs/6ef6ccb75414,45469,1732562834976/6ef6ccb75414%2C45469%2C1732562834976.1732562876180 2024-11-25T19:27:56,187 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34339:34339),(127.0.0.1/127.0.0.1:33047:33047)] 2024-11-25T19:27:56,187 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/WALs/6ef6ccb75414,45469,1732562834976/6ef6ccb75414%2C45469%2C1732562834976.1732562866030 is not closed yet, will try archiving it next time 2024-11-25T19:27:56,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741839_1015 (size=2520) 2024-11-25T19:27:56,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741839_1015 (size=2520) 2024-11-25T19:27:56,190 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/WALs/6ef6ccb75414,45469,1732562834976/6ef6ccb75414%2C45469%2C1732562834976.1732562835368 to hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/oldWALs/6ef6ccb75414%2C45469%2C1732562834976.1732562835368 2024-11-25T19:27:56,191 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T19:27:56,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T19:27:56,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-25T19:27:56,193 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-25T19:27:56,194 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T19:27:56,194 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T19:27:56,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:56,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:56,347 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45469 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-25T19:27:56,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. 2024-11-25T19:27:56,348 INFO [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 8e71bb565de9c8c7f7965ebf6f10e6b3 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-25T19:27:56,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/.tmp/info/e9ed5b382f9442e181e15a966ee031c1 is 1080, key is row0000/info:/1732562876179/Put/seqid=0 2024-11-25T19:27:56,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741843_1019 (size=6033) 2024-11-25T19:27:56,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741843_1019 (size=6033) 2024-11-25T19:27:56,363 INFO [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/.tmp/info/e9ed5b382f9442e181e15a966ee031c1 2024-11-25T19:27:56,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/.tmp/info/e9ed5b382f9442e181e15a966ee031c1 as hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/e9ed5b382f9442e181e15a966ee031c1 2024-11-25T19:27:56,375 INFO [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/e9ed5b382f9442e181e15a966ee031c1, entries=1, sequenceid=18, filesize=5.9 K 2024-11-25T19:27:56,376 INFO [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8e71bb565de9c8c7f7965ebf6f10e6b3 in 28ms, sequenceid=18, compaction requested=false 2024-11-25T19:27:56,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 8e71bb565de9c8c7f7965ebf6f10e6b3: 2024-11-25T19:27:56,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. 2024-11-25T19:27:56,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-25T19:27:56,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-25T19:27:56,382 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-25T19:27:56,382 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 185 msec 2024-11-25T19:27:56,384 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 191 msec 2024-11-25T19:27:56,649 INFO [master/6ef6ccb75414:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-25T19:27:56,649 INFO [master/6ef6ccb75414:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-25T19:27:57,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:57,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:58,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:58,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:59,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:27:59,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:00,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:00,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:01,156 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 8e71bb565de9c8c7f7965ebf6f10e6b3, had cached 0 bytes from a total of 14329 2024-11-25T19:28:01,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:01,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:02,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:02,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:03,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:03,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:04,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:04,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:05,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:05,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:06,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44919 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-25T19:28:06,237 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-25T19:28:06,243 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C45469%2C1732562834976.1732562886243 2024-11-25T19:28:06,253 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:28:06,254 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:28:06,254 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:28:06,254 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:28:06,254 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:28:06,254 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/WALs/6ef6ccb75414,45469,1732562834976/6ef6ccb75414%2C45469%2C1732562834976.1732562876180 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/WALs/6ef6ccb75414,45469,1732562834976/6ef6ccb75414%2C45469%2C1732562834976.1732562886243 2024-11-25T19:28:06,256 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34339:34339),(127.0.0.1/127.0.0.1:33047:33047)] 2024-11-25T19:28:06,256 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/WALs/6ef6ccb75414,45469,1732562834976/6ef6ccb75414%2C45469%2C1732562834976.1732562876180 is not closed yet, will try archiving it next time 2024-11-25T19:28:06,256 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/WALs/6ef6ccb75414,45469,1732562834976/6ef6ccb75414%2C45469%2C1732562834976.1732562866030 to hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/oldWALs/6ef6ccb75414%2C45469%2C1732562834976.1732562866030 2024-11-25T19:28:06,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T19:28:06,257 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T19:28:06,257 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T19:28:06,257 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:28:06,257 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:28:06,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741842_1018 (size=2026) 2024-11-25T19:28:06,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741842_1018 (size=2026) 2024-11-25T19:28:06,257 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T19:28:06,258 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T19:28:06,258 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=754768710, stopped=false 2024-11-25T19:28:06,258 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6ef6ccb75414,44919,1732562834930 2024-11-25T19:28:06,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T19:28:06,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45469-0x100785a32500001, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T19:28:06,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:28:06,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45469-0x100785a32500001, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:28:06,259 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T19:28:06,259 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T19:28:06,259 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T19:28:06,259 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:28:06,259 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45469-0x100785a32500001, quorum=127.0.0.1:50449, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:28:06,259 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6ef6ccb75414,45469,1732562834976' ***** 2024-11-25T19:28:06,259 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T19:28:06,260 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:28:06,261 INFO [RS:0;6ef6ccb75414:45469 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T19:28:06,261 INFO [RS:0;6ef6ccb75414:45469 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T19:28:06,261 INFO [RS:0;6ef6ccb75414:45469 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T19:28:06,261 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T19:28:06,261 INFO [RS:0;6ef6ccb75414:45469 {}] regionserver.HRegionServer(3091): Received CLOSE for 8e71bb565de9c8c7f7965ebf6f10e6b3 2024-11-25T19:28:06,263 INFO [RS:0;6ef6ccb75414:45469 {}] regionserver.HRegionServer(959): stopping server 6ef6ccb75414,45469,1732562834976 2024-11-25T19:28:06,263 INFO [RS:0;6ef6ccb75414:45469 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T19:28:06,263 INFO [RS:0;6ef6ccb75414:45469 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6ef6ccb75414:45469. 2024-11-25T19:28:06,263 DEBUG [RS:0;6ef6ccb75414:45469 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T19:28:06,263 DEBUG [RS:0;6ef6ccb75414:45469 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:28:06,263 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8e71bb565de9c8c7f7965ebf6f10e6b3, disabling compactions & flushes 2024-11-25T19:28:06,263 INFO [RS:0;6ef6ccb75414:45469 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T19:28:06,263 INFO [RS:0;6ef6ccb75414:45469 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T19:28:06,263 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. 2024-11-25T19:28:06,263 INFO [RS:0;6ef6ccb75414:45469 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T19:28:06,264 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. 2024-11-25T19:28:06,264 INFO [RS:0;6ef6ccb75414:45469 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T19:28:06,264 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. after waiting 0 ms 2024-11-25T19:28:06,264 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. 2024-11-25T19:28:06,264 INFO [RS:0;6ef6ccb75414:45469 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-25T19:28:06,264 DEBUG [RS:0;6ef6ccb75414:45469 {}] regionserver.HRegionServer(1325): Online Regions={8e71bb565de9c8c7f7965ebf6f10e6b3=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3., 1588230740=hbase:meta,,1.1588230740} 2024-11-25T19:28:06,264 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 8e71bb565de9c8c7f7965ebf6f10e6b3 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-25T19:28:06,264 DEBUG [RS:0;6ef6ccb75414:45469 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8e71bb565de9c8c7f7965ebf6f10e6b3 2024-11-25T19:28:06,264 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T19:28:06,264 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T19:28:06,264 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T19:28:06,264 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T19:28:06,264 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T19:28:06,264 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-25T19:28:06,268 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/.tmp/info/df566b4867f44d60a6a298e294d3bab7 is 1080, key is row0001/info:/1732562886239/Put/seqid=0 2024-11-25T19:28:06,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741845_1021 (size=6033) 2024-11-25T19:28:06,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741845_1021 (size=6033) 2024-11-25T19:28:06,279 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/.tmp/info/df566b4867f44d60a6a298e294d3bab7 2024-11-25T19:28:06,280 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/hbase/meta/1588230740/.tmp/info/7eb406a6bc874ff7a3c698bbec42388f is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3./info:regioninfo/1732562836170/Put/seqid=0 2024-11-25T19:28:06,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741846_1022 (size=7308) 2024-11-25T19:28:06,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741846_1022 (size=7308) 2024-11-25T19:28:06,285 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/.tmp/info/df566b4867f44d60a6a298e294d3bab7 as hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/df566b4867f44d60a6a298e294d3bab7 2024-11-25T19:28:06,285 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/hbase/meta/1588230740/.tmp/info/7eb406a6bc874ff7a3c698bbec42388f 2024-11-25T19:28:06,290 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/df566b4867f44d60a6a298e294d3bab7, entries=1, sequenceid=22, filesize=5.9 K 2024-11-25T19:28:06,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:06,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:06,291 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8e71bb565de9c8c7f7965ebf6f10e6b3 in 27ms, sequenceid=22, compaction requested=true 2024-11-25T19:28:06,292 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/acaf0ec7cc1b4ae7b6c08d718f89cd18, hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/ab0a369d7819419aae67dc764afc41d4, hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/35e73d7e7d3b423286fdade803b9fd17] to archive 2024-11-25T19:28:06,292 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T19:28:06,294 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/acaf0ec7cc1b4ae7b6c08d718f89cd18 to hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/acaf0ec7cc1b4ae7b6c08d718f89cd18 2024-11-25T19:28:06,295 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/ab0a369d7819419aae67dc764afc41d4 to hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/ab0a369d7819419aae67dc764afc41d4 2024-11-25T19:28:06,297 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/35e73d7e7d3b423286fdade803b9fd17 to hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/info/35e73d7e7d3b423286fdade803b9fd17 2024-11-25T19:28:06,297 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=6ef6ccb75414:44919 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-25T19:28:06,297 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [acaf0ec7cc1b4ae7b6c08d718f89cd18=6033, ab0a369d7819419aae67dc764afc41d4=6033, 35e73d7e7d3b423286fdade803b9fd17=6033] 2024-11-25T19:28:06,301 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e71bb565de9c8c7f7965ebf6f10e6b3/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-25T19:28:06,302 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. 2024-11-25T19:28:06,302 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8e71bb565de9c8c7f7965ebf6f10e6b3: Waiting for close lock at 1732562886263Running coprocessor pre-close hooks at 1732562886263Disabling compacts and flushes for region at 1732562886263Disabling writes for close at 1732562886264 (+1 ms)Obtaining lock to block concurrent updates at 1732562886264Preparing flush snapshotting stores in 8e71bb565de9c8c7f7965ebf6f10e6b3 at 1732562886264Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732562886264Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. at 1732562886265 (+1 ms)Flushing 8e71bb565de9c8c7f7965ebf6f10e6b3/info: creating writer at 1732562886265Flushing 8e71bb565de9c8c7f7965ebf6f10e6b3/info: appending metadata at 1732562886268 (+3 ms)Flushing 8e71bb565de9c8c7f7965ebf6f10e6b3/info: closing flushed file at 1732562886268Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7bf1c832: reopening flushed file at 1732562886285 (+17 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8e71bb565de9c8c7f7965ebf6f10e6b3 in 27ms, sequenceid=22, compaction requested=true at 1732562886291 (+6 ms)Writing region close event to WAL at 1732562886298 (+7 ms)Running coprocessor post-close hooks at 1732562886301 (+3 ms)Closed at 1732562886301 2024-11-25T19:28:06,302 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732562835807.8e71bb565de9c8c7f7965ebf6f10e6b3. 2024-11-25T19:28:06,306 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/hbase/meta/1588230740/.tmp/ns/f4057ca5257b446f83e5a2767cb77c7f is 43, key is default/ns:d/1732562835778/Put/seqid=0 2024-11-25T19:28:06,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741847_1023 (size=5153) 2024-11-25T19:28:06,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741847_1023 (size=5153) 2024-11-25T19:28:06,310 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/hbase/meta/1588230740/.tmp/ns/f4057ca5257b446f83e5a2767cb77c7f 2024-11-25T19:28:06,327 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/hbase/meta/1588230740/.tmp/table/84360c4de6fb4310a0a0ca3212d079b5 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732562836181/Put/seqid=0 2024-11-25T19:28:06,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741848_1024 (size=5508) 2024-11-25T19:28:06,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741848_1024 (size=5508) 2024-11-25T19:28:06,332 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/hbase/meta/1588230740/.tmp/table/84360c4de6fb4310a0a0ca3212d079b5 2024-11-25T19:28:06,338 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/hbase/meta/1588230740/.tmp/info/7eb406a6bc874ff7a3c698bbec42388f as hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/hbase/meta/1588230740/info/7eb406a6bc874ff7a3c698bbec42388f 2024-11-25T19:28:06,343 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/hbase/meta/1588230740/info/7eb406a6bc874ff7a3c698bbec42388f, entries=10, sequenceid=11, filesize=7.1 K 2024-11-25T19:28:06,344 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/hbase/meta/1588230740/.tmp/ns/f4057ca5257b446f83e5a2767cb77c7f as hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/hbase/meta/1588230740/ns/f4057ca5257b446f83e5a2767cb77c7f 2024-11-25T19:28:06,348 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/hbase/meta/1588230740/ns/f4057ca5257b446f83e5a2767cb77c7f, entries=2, sequenceid=11, filesize=5.0 K 2024-11-25T19:28:06,349 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/hbase/meta/1588230740/.tmp/table/84360c4de6fb4310a0a0ca3212d079b5 as hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/hbase/meta/1588230740/table/84360c4de6fb4310a0a0ca3212d079b5 2024-11-25T19:28:06,354 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/hbase/meta/1588230740/table/84360c4de6fb4310a0a0ca3212d079b5, entries=2, sequenceid=11, filesize=5.4 K 2024-11-25T19:28:06,355 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 91ms, sequenceid=11, compaction requested=false 2024-11-25T19:28:06,359 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-25T19:28:06,360 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T19:28:06,360 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T19:28:06,360 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732562886264Running coprocessor pre-close hooks at 1732562886264Disabling compacts and flushes for region at 1732562886264Disabling writes for close at 1732562886264Obtaining lock to block concurrent updates at 1732562886264Preparing flush snapshotting stores in 1588230740 at 1732562886264Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732562886264Flushing stores of hbase:meta,,1.1588230740 at 1732562886265 (+1 ms)Flushing 1588230740/info: creating writer at 1732562886265Flushing 1588230740/info: appending metadata at 1732562886279 (+14 ms)Flushing 1588230740/info: closing flushed file at 1732562886279Flushing 1588230740/ns: creating writer at 1732562886290 (+11 ms)Flushing 1588230740/ns: appending metadata at 1732562886305 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1732562886305Flushing 1588230740/table: creating writer at 1732562886315 (+10 ms)Flushing 1588230740/table: appending metadata at 1732562886327 (+12 ms)Flushing 1588230740/table: closing flushed file at 1732562886327Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17107b09: reopening flushed file at 1732562886337 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b8f7017: reopening flushed file at 1732562886343 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c326dfd: reopening flushed file at 1732562886348 (+5 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 91ms, sequenceid=11, compaction requested=false at 1732562886355 (+7 ms)Writing region close event to WAL at 1732562886356 (+1 ms)Running coprocessor post-close hooks at 1732562886360 (+4 ms)Closed at 1732562886360 2024-11-25T19:28:06,360 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T19:28:06,464 INFO [RS:0;6ef6ccb75414:45469 {}] regionserver.HRegionServer(976): stopping server 6ef6ccb75414,45469,1732562834976; all regions closed. 2024-11-25T19:28:06,465 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:28:06,465 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:28:06,466 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:28:06,466 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:28:06,466 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:28:06,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741834_1010 (size=3306) 2024-11-25T19:28:06,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741834_1010 (size=3306) 2024-11-25T19:28:06,476 DEBUG [RS:0;6ef6ccb75414:45469 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/oldWALs 2024-11-25T19:28:06,477 INFO [RS:0;6ef6ccb75414:45469 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6ef6ccb75414%2C45469%2C1732562834976.meta:.meta(num 1732562835733) 2024-11-25T19:28:06,477 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:28:06,478 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:28:06,478 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:28:06,478 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:28:06,478 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:28:06,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741844_1020 (size=1252) 2024-11-25T19:28:06,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741844_1020 (size=1252) 2024-11-25T19:28:06,486 DEBUG [RS:0;6ef6ccb75414:45469 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/oldWALs 2024-11-25T19:28:06,486 INFO [RS:0;6ef6ccb75414:45469 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6ef6ccb75414%2C45469%2C1732562834976:(num 1732562886243) 2024-11-25T19:28:06,486 DEBUG [RS:0;6ef6ccb75414:45469 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:28:06,486 INFO [RS:0;6ef6ccb75414:45469 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T19:28:06,487 INFO [RS:0;6ef6ccb75414:45469 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T19:28:06,487 INFO [RS:0;6ef6ccb75414:45469 {}] hbase.ChoreService(370): Chore service for: regionserver/6ef6ccb75414:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-25T19:28:06,487 INFO [RS:0;6ef6ccb75414:45469 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T19:28:06,487 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T19:28:06,487 INFO [RS:0;6ef6ccb75414:45469 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45469 2024-11-25T19:28:06,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T19:28:06,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45469-0x100785a32500001, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6ef6ccb75414,45469,1732562834976 2024-11-25T19:28:06,489 INFO [RS:0;6ef6ccb75414:45469 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T19:28:06,491 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6ef6ccb75414,45469,1732562834976] 2024-11-25T19:28:06,492 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6ef6ccb75414,45469,1732562834976 already deleted, retry=false 2024-11-25T19:28:06,492 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6ef6ccb75414,45469,1732562834976 expired; onlineServers=0 2024-11-25T19:28:06,492 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6ef6ccb75414,44919,1732562834930' ***** 2024-11-25T19:28:06,492 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T19:28:06,492 INFO [M:0;6ef6ccb75414:44919 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T19:28:06,492 INFO [M:0;6ef6ccb75414:44919 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T19:28:06,492 DEBUG [M:0;6ef6ccb75414:44919 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T19:28:06,492 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T19:28:06,492 DEBUG [M:0;6ef6ccb75414:44919 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T19:28:06,492 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.large.0-1732562835117 {}] cleaner.HFileCleaner(306): Exit Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.large.0-1732562835117,5,FailOnTimeoutGroup] 2024-11-25T19:28:06,492 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.small.0-1732562835117 {}] cleaner.HFileCleaner(306): Exit Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.small.0-1732562835117,5,FailOnTimeoutGroup] 2024-11-25T19:28:06,492 INFO [M:0;6ef6ccb75414:44919 {}] hbase.ChoreService(370): Chore service for: master/6ef6ccb75414:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T19:28:06,493 INFO [M:0;6ef6ccb75414:44919 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T19:28:06,493 DEBUG [M:0;6ef6ccb75414:44919 {}] master.HMaster(1795): Stopping service threads 2024-11-25T19:28:06,493 INFO [M:0;6ef6ccb75414:44919 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T19:28:06,493 INFO [M:0;6ef6ccb75414:44919 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T19:28:06,493 INFO [M:0;6ef6ccb75414:44919 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T19:28:06,493 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T19:28:06,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T19:28:06,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:28:06,494 DEBUG [M:0;6ef6ccb75414:44919 {}] zookeeper.ZKUtil(347): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-25T19:28:06,494 WARN [M:0;6ef6ccb75414:44919 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-25T19:28:06,494 INFO [M:0;6ef6ccb75414:44919 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/.lastflushedseqids 2024-11-25T19:28:06,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741849_1025 (size=130) 2024-11-25T19:28:06,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741849_1025 (size=130) 2024-11-25T19:28:06,499 INFO [M:0;6ef6ccb75414:44919 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T19:28:06,499 INFO [M:0;6ef6ccb75414:44919 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T19:28:06,499 DEBUG [M:0;6ef6ccb75414:44919 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T19:28:06,499 INFO [M:0;6ef6ccb75414:44919 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:28:06,500 DEBUG [M:0;6ef6ccb75414:44919 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:28:06,500 DEBUG [M:0;6ef6ccb75414:44919 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T19:28:06,500 DEBUG [M:0;6ef6ccb75414:44919 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:28:06,500 INFO [M:0;6ef6ccb75414:44919 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.59 KB heapSize=55 KB 2024-11-25T19:28:06,513 DEBUG [M:0;6ef6ccb75414:44919 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8a918fbb0758440c875531d6f402e4c9 is 82, key is hbase:meta,,1/info:regioninfo/1732562835765/Put/seqid=0 2024-11-25T19:28:06,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741850_1026 (size=5672) 2024-11-25T19:28:06,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741850_1026 (size=5672) 2024-11-25T19:28:06,518 INFO [M:0;6ef6ccb75414:44919 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8a918fbb0758440c875531d6f402e4c9 2024-11-25T19:28:06,537 DEBUG [M:0;6ef6ccb75414:44919 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f797794ce96a4299b3511118465d23c6 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732562836186/Put/seqid=0 2024-11-25T19:28:06,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741851_1027 (size=7823) 2024-11-25T19:28:06,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741851_1027 (size=7823) 2024-11-25T19:28:06,542 INFO [M:0;6ef6ccb75414:44919 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.99 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f797794ce96a4299b3511118465d23c6 2024-11-25T19:28:06,546 INFO [M:0;6ef6ccb75414:44919 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f797794ce96a4299b3511118465d23c6 2024-11-25T19:28:06,561 DEBUG [M:0;6ef6ccb75414:44919 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ab957908d0914e9cb4d34b335a724433 is 69, key is 6ef6ccb75414,45469,1732562834976/rs:state/1732562835223/Put/seqid=0 2024-11-25T19:28:06,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741852_1028 (size=5156) 2024-11-25T19:28:06,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741852_1028 (size=5156) 2024-11-25T19:28:06,566 INFO [M:0;6ef6ccb75414:44919 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ab957908d0914e9cb4d34b335a724433 2024-11-25T19:28:06,587 DEBUG [M:0;6ef6ccb75414:44919 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a9b9586a7df6496d84a962420fca77de is 52, key is load_balancer_on/state:d/1732562835803/Put/seqid=0 2024-11-25T19:28:06,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45469-0x100785a32500001, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:28:06,591 INFO [RS:0;6ef6ccb75414:45469 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T19:28:06,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45469-0x100785a32500001, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:28:06,591 INFO [RS:0;6ef6ccb75414:45469 {}] regionserver.HRegionServer(1031): Exiting; stopping=6ef6ccb75414,45469,1732562834976; zookeeper connection closed. 2024-11-25T19:28:06,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741853_1029 (size=5056) 2024-11-25T19:28:06,591 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@612e3d23 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@612e3d23 2024-11-25T19:28:06,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741853_1029 (size=5056) 2024-11-25T19:28:06,591 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-25T19:28:06,591 INFO [M:0;6ef6ccb75414:44919 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a9b9586a7df6496d84a962420fca77de 2024-11-25T19:28:06,596 DEBUG [M:0;6ef6ccb75414:44919 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8a918fbb0758440c875531d6f402e4c9 as hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8a918fbb0758440c875531d6f402e4c9 2024-11-25T19:28:06,601 INFO [M:0;6ef6ccb75414:44919 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8a918fbb0758440c875531d6f402e4c9, entries=8, sequenceid=121, filesize=5.5 K 2024-11-25T19:28:06,603 DEBUG [M:0;6ef6ccb75414:44919 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f797794ce96a4299b3511118465d23c6 as hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f797794ce96a4299b3511118465d23c6 2024-11-25T19:28:06,609 INFO [M:0;6ef6ccb75414:44919 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f797794ce96a4299b3511118465d23c6 2024-11-25T19:28:06,609 INFO [M:0;6ef6ccb75414:44919 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f797794ce96a4299b3511118465d23c6, entries=14, sequenceid=121, filesize=7.6 K 2024-11-25T19:28:06,610 DEBUG [M:0;6ef6ccb75414:44919 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ab957908d0914e9cb4d34b335a724433 as hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ab957908d0914e9cb4d34b335a724433 2024-11-25T19:28:06,614 INFO [M:0;6ef6ccb75414:44919 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ab957908d0914e9cb4d34b335a724433, entries=1, sequenceid=121, filesize=5.0 K 2024-11-25T19:28:06,615 DEBUG [M:0;6ef6ccb75414:44919 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a9b9586a7df6496d84a962420fca77de as hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a9b9586a7df6496d84a962420fca77de 2024-11-25T19:28:06,620 INFO [M:0;6ef6ccb75414:44919 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44277/user/jenkins/test-data/313f1f50-1b27-649b-42f3-6b18cb162158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a9b9586a7df6496d84a962420fca77de, entries=1, sequenceid=121, filesize=4.9 K 2024-11-25T19:28:06,621 INFO [M:0;6ef6ccb75414:44919 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=121, compaction requested=false 2024-11-25T19:28:06,623 INFO [M:0;6ef6ccb75414:44919 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:28:06,623 DEBUG [M:0;6ef6ccb75414:44919 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732562886499Disabling compacts and flushes for region at 1732562886499Disabling writes for close at 1732562886500 (+1 ms)Obtaining lock to block concurrent updates at 1732562886500Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732562886500Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44638, getHeapSize=56256, getOffHeapSize=0, getCellsCount=140 at 1732562886500Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732562886501 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732562886501Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732562886513 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732562886513Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732562886523 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732562886536 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732562886536Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732562886546 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732562886560 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732562886560Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732562886570 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732562886586 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732562886586Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6adfb4d: reopening flushed file at 1732562886596 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2645d423: reopening flushed file at 1732562886602 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@775d8acf: reopening flushed file at 1732562886609 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@357cd437: reopening flushed file at 1732562886614 (+5 ms)Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=121, compaction requested=false at 1732562886621 (+7 ms)Writing region close event to WAL at 1732562886622 (+1 ms)Closed at 1732562886622 2024-11-25T19:28:06,623 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:28:06,623 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:28:06,623 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:28:06,623 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:28:06,623 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:28:06,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45807 is added to blk_1073741830_1006 (size=53035) 2024-11-25T19:28:06,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45043 is added to blk_1073741830_1006 (size=53035) 2024-11-25T19:28:06,626 INFO [M:0;6ef6ccb75414:44919 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T19:28:06,626 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T19:28:06,626 INFO [M:0;6ef6ccb75414:44919 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44919 2024-11-25T19:28:06,626 INFO [M:0;6ef6ccb75414:44919 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T19:28:06,727 INFO [M:0;6ef6ccb75414:44919 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T19:28:06,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:28:06,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44919-0x100785a32500000, quorum=127.0.0.1:50449, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:28:06,734 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6c243e85{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:28:06,735 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@231eb320{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:28:06,735 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:28:06,735 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6da95783{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:28:06,735 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40c8737d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/hadoop.log.dir/,STOPPED} 2024-11-25T19:28:06,738 WARN [BP-1570157930-172.17.0.2-1732562834373 heartbeating to localhost/127.0.0.1:44277 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T19:28:06,738 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T19:28:06,738 WARN [BP-1570157930-172.17.0.2-1732562834373 heartbeating to localhost/127.0.0.1:44277 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1570157930-172.17.0.2-1732562834373 (Datanode Uuid 47e8db2c-70db-4c41-a903-6d1e1ad16475) service to localhost/127.0.0.1:44277 2024-11-25T19:28:06,738 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T19:28:06,739 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/cluster_ac4d3f7e-84df-9155-e77b-9a9c502acb43/data/data3/current/BP-1570157930-172.17.0.2-1732562834373 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:28:06,739 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/cluster_ac4d3f7e-84df-9155-e77b-9a9c502acb43/data/data4/current/BP-1570157930-172.17.0.2-1732562834373 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:28:06,739 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T19:28:06,743 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@a560185{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:28:06,743 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1850586f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:28:06,743 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:28:06,743 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30c71845{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:28:06,743 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@38f5461{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/hadoop.log.dir/,STOPPED} 2024-11-25T19:28:06,745 WARN [BP-1570157930-172.17.0.2-1732562834373 heartbeating to localhost/127.0.0.1:44277 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T19:28:06,745 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T19:28:06,745 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T19:28:06,745 WARN [BP-1570157930-172.17.0.2-1732562834373 heartbeating to localhost/127.0.0.1:44277 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1570157930-172.17.0.2-1732562834373 (Datanode Uuid 99742ee7-a7b7-4c5f-98e2-68bae2abba81) service to localhost/127.0.0.1:44277 2024-11-25T19:28:06,745 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/cluster_ac4d3f7e-84df-9155-e77b-9a9c502acb43/data/data1/current/BP-1570157930-172.17.0.2-1732562834373 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:28:06,745 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/cluster_ac4d3f7e-84df-9155-e77b-9a9c502acb43/data/data2/current/BP-1570157930-172.17.0.2-1732562834373 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:28:06,745 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T19:28:06,750 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@e0da52c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T19:28:06,751 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@637fc016{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:28:06,751 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:28:06,751 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35b0b5b4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:28:06,751 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30eae670{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/hadoop.log.dir/,STOPPED} 2024-11-25T19:28:06,756 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T19:28:06,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T19:28:06,782 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 179) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44277 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44277 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44277 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44277 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44277 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/6ef6ccb75414:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44277 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44277 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:44277 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44277 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=67 (was 139), ProcessCount=11 (was 11), AvailableMemoryMB=4983 (was 5116) 2024-11-25T19:28:06,790 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=206, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=67, ProcessCount=11, AvailableMemoryMB=4982 2024-11-25T19:28:06,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T19:28:06,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/hadoop.log.dir so I do NOT create it in target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab 2024-11-25T19:28:06,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/15a0aa61-1dc1-bb4f-8fd3-ed18a4d64288/hadoop.tmp.dir so I do NOT create it in target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab 2024-11-25T19:28:06,791 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/cluster_0cfe2bdf-cbac-5acd-bb67-9d8923092911, deleteOnExit=true 2024-11-25T19:28:06,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-25T19:28:06,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/test.cache.data in system properties and HBase conf 2024-11-25T19:28:06,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T19:28:06,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/hadoop.log.dir in system properties and HBase conf 2024-11-25T19:28:06,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T19:28:06,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T19:28:06,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T19:28:06,792 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T19:28:06,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T19:28:06,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T19:28:06,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T19:28:06,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T19:28:06,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T19:28:06,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T19:28:06,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T19:28:06,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T19:28:06,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T19:28:06,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/nfs.dump.dir in system properties and HBase conf 2024-11-25T19:28:06,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/java.io.tmpdir in system properties and HBase conf 2024-11-25T19:28:06,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T19:28:06,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T19:28:06,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T19:28:06,806 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T19:28:06,846 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:28:06,850 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:28:06,853 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:28:06,853 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:28:06,853 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T19:28:06,856 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:28:06,857 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59703725{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:28:06,857 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@240fc28c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:28:06,948 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@188ddc10{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/java.io.tmpdir/jetty-localhost-43471-hadoop-hdfs-3_4_1-tests_jar-_-any-12999707601731109697/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T19:28:06,948 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5fc37f93{HTTP/1.1, (http/1.1)}{localhost:43471} 2024-11-25T19:28:06,948 INFO [Time-limited test {}] server.Server(415): Started @237536ms 2024-11-25T19:28:06,959 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T19:28:07,003 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:28:07,006 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:28:07,006 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:28:07,006 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:28:07,007 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T19:28:07,007 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fc50460{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:28:07,007 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d639fc0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:28:07,099 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1091e18a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/java.io.tmpdir/jetty-localhost-43241-hadoop-hdfs-3_4_1-tests_jar-_-any-12856467148506107824/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:28:07,100 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3221a4aa{HTTP/1.1, (http/1.1)}{localhost:43241} 2024-11-25T19:28:07,100 INFO [Time-limited test {}] server.Server(415): Started @237688ms 2024-11-25T19:28:07,101 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T19:28:07,127 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:28:07,130 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:28:07,130 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:28:07,131 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:28:07,131 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T19:28:07,131 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@53298b3d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:28:07,131 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@314e7370{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:28:07,158 WARN [Thread-1946 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/cluster_0cfe2bdf-cbac-5acd-bb67-9d8923092911/data/data1/current/BP-1466434562-172.17.0.2-1732562886809/current, will proceed with Du for space computation calculation, 2024-11-25T19:28:07,158 WARN [Thread-1947 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/cluster_0cfe2bdf-cbac-5acd-bb67-9d8923092911/data/data2/current/BP-1466434562-172.17.0.2-1732562886809/current, will proceed with Du for space computation calculation, 2024-11-25T19:28:07,189 WARN [Thread-1925 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T19:28:07,191 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc3b68e95bf6d3f6b with lease ID 0xd627a2da0f43cafc: Processing first storage report for DS-9e396b22-abf3-4e79-90e0-89f588862bff from datanode DatanodeRegistration(127.0.0.1:44639, datanodeUuid=7db64782-c815-4771-9898-bcb168e6348c, infoPort=44791, infoSecurePort=0, ipcPort=37503, storageInfo=lv=-57;cid=testClusterID;nsid=1671949801;c=1732562886809) 2024-11-25T19:28:07,191 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc3b68e95bf6d3f6b with lease ID 0xd627a2da0f43cafc: from storage DS-9e396b22-abf3-4e79-90e0-89f588862bff node DatanodeRegistration(127.0.0.1:44639, datanodeUuid=7db64782-c815-4771-9898-bcb168e6348c, infoPort=44791, infoSecurePort=0, ipcPort=37503, storageInfo=lv=-57;cid=testClusterID;nsid=1671949801;c=1732562886809), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:28:07,191 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc3b68e95bf6d3f6b with lease ID 0xd627a2da0f43cafc: Processing first storage report for DS-6c7ddb4f-a0b5-44cf-9f75-11a3e8bfde63 from datanode DatanodeRegistration(127.0.0.1:44639, datanodeUuid=7db64782-c815-4771-9898-bcb168e6348c, infoPort=44791, infoSecurePort=0, ipcPort=37503, storageInfo=lv=-57;cid=testClusterID;nsid=1671949801;c=1732562886809) 2024-11-25T19:28:07,191 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc3b68e95bf6d3f6b with lease ID 0xd627a2da0f43cafc: from storage DS-6c7ddb4f-a0b5-44cf-9f75-11a3e8bfde63 node DatanodeRegistration(127.0.0.1:44639, datanodeUuid=7db64782-c815-4771-9898-bcb168e6348c, infoPort=44791, infoSecurePort=0, ipcPort=37503, storageInfo=lv=-57;cid=testClusterID;nsid=1671949801;c=1732562886809), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:28:07,239 INFO [regionserver/6ef6ccb75414:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T19:28:07,244 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@27cee48d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/java.io.tmpdir/jetty-localhost-35591-hadoop-hdfs-3_4_1-tests_jar-_-any-2414614852201331275/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:28:07,244 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7b29c022{HTTP/1.1, (http/1.1)}{localhost:35591} 2024-11-25T19:28:07,244 INFO [Time-limited test {}] server.Server(415): Started @237832ms 2024-11-25T19:28:07,245 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T19:28:07,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:07,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:07,297 WARN [Thread-1972 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/cluster_0cfe2bdf-cbac-5acd-bb67-9d8923092911/data/data3/current/BP-1466434562-172.17.0.2-1732562886809/current, will proceed with Du for space computation calculation, 2024-11-25T19:28:07,297 WARN [Thread-1973 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/cluster_0cfe2bdf-cbac-5acd-bb67-9d8923092911/data/data4/current/BP-1466434562-172.17.0.2-1732562886809/current, will proceed with Du for space computation calculation, 2024-11-25T19:28:07,314 WARN [Thread-1961 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T19:28:07,316 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf866b8ddf6faaf66 with lease ID 0xd627a2da0f43cafd: Processing first storage report for DS-28f74965-6ad0-4fb1-8cc1-5229fc7ba715 from datanode DatanodeRegistration(127.0.0.1:37281, datanodeUuid=3be0a97b-8343-4f9a-af4e-ed27da55a3ef, infoPort=40775, infoSecurePort=0, ipcPort=33737, storageInfo=lv=-57;cid=testClusterID;nsid=1671949801;c=1732562886809) 2024-11-25T19:28:07,316 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf866b8ddf6faaf66 with lease ID 0xd627a2da0f43cafd: from storage DS-28f74965-6ad0-4fb1-8cc1-5229fc7ba715 node DatanodeRegistration(127.0.0.1:37281, datanodeUuid=3be0a97b-8343-4f9a-af4e-ed27da55a3ef, infoPort=40775, infoSecurePort=0, ipcPort=33737, storageInfo=lv=-57;cid=testClusterID;nsid=1671949801;c=1732562886809), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:28:07,316 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf866b8ddf6faaf66 with lease ID 0xd627a2da0f43cafd: Processing first storage report for DS-3dc377ea-7ad9-4d2b-9b6b-53a61ccca2a3 from datanode DatanodeRegistration(127.0.0.1:37281, datanodeUuid=3be0a97b-8343-4f9a-af4e-ed27da55a3ef, infoPort=40775, infoSecurePort=0, ipcPort=33737, storageInfo=lv=-57;cid=testClusterID;nsid=1671949801;c=1732562886809) 2024-11-25T19:28:07,316 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf866b8ddf6faaf66 with lease ID 0xd627a2da0f43cafd: from storage DS-3dc377ea-7ad9-4d2b-9b6b-53a61ccca2a3 node DatanodeRegistration(127.0.0.1:37281, datanodeUuid=3be0a97b-8343-4f9a-af4e-ed27da55a3ef, infoPort=40775, infoSecurePort=0, ipcPort=33737, storageInfo=lv=-57;cid=testClusterID;nsid=1671949801;c=1732562886809), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:28:07,366 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab 2024-11-25T19:28:07,371 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/cluster_0cfe2bdf-cbac-5acd-bb67-9d8923092911/zookeeper_0, clientPort=59410, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/cluster_0cfe2bdf-cbac-5acd-bb67-9d8923092911/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/cluster_0cfe2bdf-cbac-5acd-bb67-9d8923092911/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T19:28:07,372 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59410 2024-11-25T19:28:07,373 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:28:07,376 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:28:07,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741825_1001 (size=7) 2024-11-25T19:28:07,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741825_1001 (size=7) 2024-11-25T19:28:07,390 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291 with version=8 2024-11-25T19:28:07,390 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/hbase-staging 2024-11-25T19:28:07,392 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6ef6ccb75414:0 server-side Connection retries=45 2024-11-25T19:28:07,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:28:07,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T19:28:07,393 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T19:28:07,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:28:07,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T19:28:07,393 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T19:28:07,393 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T19:28:07,393 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35203 2024-11-25T19:28:07,395 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35203 connecting to ZooKeeper ensemble=127.0.0.1:59410 2024-11-25T19:28:07,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:352030x0, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T19:28:07,400 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35203-0x100785aff370000 connected 2024-11-25T19:28:07,414 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:28:07,415 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:28:07,417 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:28:07,417 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291, hbase.cluster.distributed=false 2024-11-25T19:28:07,419 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T19:28:07,422 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35203 2024-11-25T19:28:07,423 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35203 2024-11-25T19:28:07,425 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35203 2024-11-25T19:28:07,426 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35203 2024-11-25T19:28:07,426 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35203 2024-11-25T19:28:07,439 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6ef6ccb75414:0 server-side Connection retries=45 2024-11-25T19:28:07,439 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:28:07,439 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T19:28:07,439 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T19:28:07,439 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:28:07,439 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T19:28:07,439 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T19:28:07,439 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T19:28:07,440 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41557 2024-11-25T19:28:07,441 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41557 connecting to ZooKeeper ensemble=127.0.0.1:59410 2024-11-25T19:28:07,441 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:28:07,443 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:28:07,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:415570x0, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T19:28:07,446 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41557-0x100785aff370001 connected 2024-11-25T19:28:07,446 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41557-0x100785aff370001, quorum=127.0.0.1:59410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:28:07,446 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T19:28:07,447 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T19:28:07,447 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41557-0x100785aff370001, quorum=127.0.0.1:59410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T19:28:07,448 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41557-0x100785aff370001, quorum=127.0.0.1:59410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T19:28:07,448 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41557 2024-11-25T19:28:07,448 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41557 2024-11-25T19:28:07,448 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41557 2024-11-25T19:28:07,449 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41557 2024-11-25T19:28:07,449 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41557 2024-11-25T19:28:07,461 DEBUG [M:0;6ef6ccb75414:35203 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6ef6ccb75414:35203 2024-11-25T19:28:07,461 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6ef6ccb75414,35203,1732562887392 2024-11-25T19:28:07,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41557-0x100785aff370001, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:28:07,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:28:07,463 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6ef6ccb75414,35203,1732562887392 2024-11-25T19:28:07,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41557-0x100785aff370001, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T19:28:07,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:28:07,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41557-0x100785aff370001, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:28:07,464 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T19:28:07,464 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6ef6ccb75414,35203,1732562887392 from backup master directory 2024-11-25T19:28:07,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6ef6ccb75414,35203,1732562887392 2024-11-25T19:28:07,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41557-0x100785aff370001, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:28:07,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:28:07,465 WARN [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T19:28:07,465 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6ef6ccb75414,35203,1732562887392 2024-11-25T19:28:07,468 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/hbase.id] with ID: 1878c821-c6ca-41b5-a02f-143a721c88a7 2024-11-25T19:28:07,468 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/.tmp/hbase.id 2024-11-25T19:28:07,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741826_1002 (size=42) 2024-11-25T19:28:07,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741826_1002 (size=42) 2024-11-25T19:28:07,474 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/.tmp/hbase.id]:[hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/hbase.id] 2024-11-25T19:28:07,483 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:28:07,483 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T19:28:07,484 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-25T19:28:07,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41557-0x100785aff370001, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:28:07,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:28:07,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741827_1003 (size=196) 2024-11-25T19:28:07,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741827_1003 (size=196) 2024-11-25T19:28:07,491 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T19:28:07,492 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T19:28:07,492 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:28:07,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741828_1004 (size=1189) 2024-11-25T19:28:07,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741828_1004 (size=1189) 2024-11-25T19:28:07,500 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store 2024-11-25T19:28:07,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741829_1005 (size=34) 2024-11-25T19:28:07,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741829_1005 (size=34) 2024-11-25T19:28:07,506 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:28:07,506 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T19:28:07,506 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:28:07,506 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:28:07,506 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T19:28:07,506 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:28:07,506 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:28:07,506 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732562887506Disabling compacts and flushes for region at 1732562887506Disabling writes for close at 1732562887506Writing region close event to WAL at 1732562887506Closed at 1732562887506 2024-11-25T19:28:07,507 WARN [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/.initializing 2024-11-25T19:28:07,507 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/WALs/6ef6ccb75414,35203,1732562887392 2024-11-25T19:28:07,509 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ef6ccb75414%2C35203%2C1732562887392, suffix=, logDir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/WALs/6ef6ccb75414,35203,1732562887392, archiveDir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/oldWALs, maxLogs=10 2024-11-25T19:28:07,509 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C35203%2C1732562887392.1732562887509 2024-11-25T19:28:07,513 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/WALs/6ef6ccb75414,35203,1732562887392/6ef6ccb75414%2C35203%2C1732562887392.1732562887509 2024-11-25T19:28:07,517 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40775:40775),(127.0.0.1/127.0.0.1:44791:44791)] 2024-11-25T19:28:07,518 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T19:28:07,518 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:28:07,518 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:28:07,518 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:28:07,519 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:28:07,520 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T19:28:07,520 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:28:07,521 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:28:07,521 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:28:07,522 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T19:28:07,522 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:28:07,522 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:28:07,522 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:28:07,523 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T19:28:07,523 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:28:07,524 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:28:07,524 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:28:07,525 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T19:28:07,525 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:28:07,525 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:28:07,525 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:28:07,526 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:28:07,526 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:28:07,527 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:28:07,527 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:28:07,527 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T19:28:07,528 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:28:07,530 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T19:28:07,530 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=879899, jitterRate=0.11885049939155579}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T19:28:07,531 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732562887518Initializing all the Stores at 1732562887519 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562887519Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562887519Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562887519Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562887519Cleaning up temporary data from old regions at 1732562887527 (+8 ms)Region opened successfully at 1732562887531 (+4 ms) 2024-11-25T19:28:07,531 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T19:28:07,534 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c8d10ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ef6ccb75414/172.17.0.2:0 2024-11-25T19:28:07,535 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T19:28:07,535 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T19:28:07,535 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T19:28:07,535 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T19:28:07,536 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-25T19:28:07,536 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-25T19:28:07,536 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T19:28:07,540 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T19:28:07,540 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T19:28:07,541 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T19:28:07,541 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T19:28:07,542 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T19:28:07,543 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T19:28:07,543 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T19:28:07,544 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T19:28:07,544 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T19:28:07,545 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T19:28:07,546 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T19:28:07,547 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T19:28:07,548 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T19:28:07,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41557-0x100785aff370001, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T19:28:07,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T19:28:07,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:28:07,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41557-0x100785aff370001, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:28:07,549 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6ef6ccb75414,35203,1732562887392, sessionid=0x100785aff370000, setting cluster-up flag (Was=false) 2024-11-25T19:28:07,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:28:07,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41557-0x100785aff370001, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:28:07,553 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T19:28:07,554 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6ef6ccb75414,35203,1732562887392 2024-11-25T19:28:07,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:28:07,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41557-0x100785aff370001, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:28:07,558 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T19:28:07,559 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6ef6ccb75414,35203,1732562887392 2024-11-25T19:28:07,560 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T19:28:07,562 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T19:28:07,562 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T19:28:07,562 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T19:28:07,562 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6ef6ccb75414,35203,1732562887392 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T19:28:07,563 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:28:07,563 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:28:07,563 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:28:07,563 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:28:07,563 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6ef6ccb75414:0, corePoolSize=10, maxPoolSize=10 2024-11-25T19:28:07,563 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:28:07,563 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=2, maxPoolSize=2 2024-11-25T19:28:07,563 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:28:07,566 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732562917566 2024-11-25T19:28:07,566 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T19:28:07,566 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T19:28:07,566 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T19:28:07,566 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T19:28:07,566 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T19:28:07,566 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T19:28:07,566 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T19:28:07,566 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T19:28:07,567 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:28:07,567 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T19:28:07,569 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T19:28:07,569 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T19:28:07,569 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T19:28:07,569 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T19:28:07,570 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T19:28:07,570 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T19:28:07,570 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.large.0-1732562887570,5,FailOnTimeoutGroup] 2024-11-25T19:28:07,570 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.small.0-1732562887570,5,FailOnTimeoutGroup] 2024-11-25T19:28:07,570 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T19:28:07,570 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T19:28:07,570 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T19:28:07,570 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T19:28:07,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741831_1007 (size=1321) 2024-11-25T19:28:07,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741831_1007 (size=1321) 2024-11-25T19:28:07,577 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T19:28:07,577 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291 2024-11-25T19:28:07,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741832_1008 (size=32) 2024-11-25T19:28:07,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741832_1008 (size=32) 2024-11-25T19:28:07,582 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:28:07,584 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T19:28:07,585 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T19:28:07,585 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:28:07,585 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:28:07,585 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T19:28:07,586 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T19:28:07,586 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:28:07,587 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:28:07,587 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T19:28:07,588 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T19:28:07,588 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:28:07,588 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:28:07,588 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T19:28:07,589 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T19:28:07,589 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:28:07,589 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:28:07,590 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T19:28:07,590 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740 2024-11-25T19:28:07,590 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740 2024-11-25T19:28:07,592 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T19:28:07,592 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T19:28:07,592 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T19:28:07,594 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T19:28:07,596 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T19:28:07,597 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=827593, jitterRate=0.05233912169933319}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T19:28:07,597 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732562887583Initializing all the Stores at 1732562887583Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562887583Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562887583Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562887583Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562887583Cleaning up temporary data from old regions at 1732562887592 (+9 ms)Region opened successfully at 1732562887597 (+5 ms) 2024-11-25T19:28:07,597 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T19:28:07,598 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T19:28:07,598 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T19:28:07,598 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T19:28:07,598 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T19:28:07,598 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T19:28:07,598 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732562887597Disabling compacts and flushes for region at 1732562887597Disabling writes for close at 1732562887598 (+1 ms)Writing region close event to WAL at 1732562887598Closed at 1732562887598 2024-11-25T19:28:07,599 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T19:28:07,599 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T19:28:07,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T19:28:07,601 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T19:28:07,602 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T19:28:07,652 INFO [RS:0;6ef6ccb75414:41557 {}] regionserver.HRegionServer(746): ClusterId : 1878c821-c6ca-41b5-a02f-143a721c88a7 2024-11-25T19:28:07,652 DEBUG [RS:0;6ef6ccb75414:41557 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T19:28:07,656 DEBUG [RS:0;6ef6ccb75414:41557 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T19:28:07,656 DEBUG [RS:0;6ef6ccb75414:41557 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T19:28:07,659 DEBUG [RS:0;6ef6ccb75414:41557 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T19:28:07,660 DEBUG [RS:0;6ef6ccb75414:41557 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d4b6082, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ef6ccb75414/172.17.0.2:0 2024-11-25T19:28:07,677 DEBUG [RS:0;6ef6ccb75414:41557 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6ef6ccb75414:41557 2024-11-25T19:28:07,677 INFO [RS:0;6ef6ccb75414:41557 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T19:28:07,677 INFO [RS:0;6ef6ccb75414:41557 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T19:28:07,677 DEBUG [RS:0;6ef6ccb75414:41557 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T19:28:07,678 INFO [RS:0;6ef6ccb75414:41557 {}] regionserver.HRegionServer(2659): reportForDuty to master=6ef6ccb75414,35203,1732562887392 with port=41557, startcode=1732562887438 2024-11-25T19:28:07,678 DEBUG [RS:0;6ef6ccb75414:41557 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T19:28:07,680 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44959, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T19:28:07,681 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35203 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6ef6ccb75414,41557,1732562887438 2024-11-25T19:28:07,681 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35203 {}] master.ServerManager(517): Registering regionserver=6ef6ccb75414,41557,1732562887438 2024-11-25T19:28:07,682 DEBUG [RS:0;6ef6ccb75414:41557 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291 2024-11-25T19:28:07,682 DEBUG [RS:0;6ef6ccb75414:41557 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44093 2024-11-25T19:28:07,682 DEBUG [RS:0;6ef6ccb75414:41557 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T19:28:07,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T19:28:07,684 DEBUG [RS:0;6ef6ccb75414:41557 {}] zookeeper.ZKUtil(111): regionserver:41557-0x100785aff370001, quorum=127.0.0.1:59410, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6ef6ccb75414,41557,1732562887438 2024-11-25T19:28:07,684 WARN [RS:0;6ef6ccb75414:41557 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T19:28:07,684 INFO [RS:0;6ef6ccb75414:41557 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:28:07,684 DEBUG [RS:0;6ef6ccb75414:41557 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/WALs/6ef6ccb75414,41557,1732562887438 2024-11-25T19:28:07,684 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6ef6ccb75414,41557,1732562887438] 2024-11-25T19:28:07,688 INFO [RS:0;6ef6ccb75414:41557 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T19:28:07,689 INFO [RS:0;6ef6ccb75414:41557 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T19:28:07,690 INFO [RS:0;6ef6ccb75414:41557 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T19:28:07,690 INFO [RS:0;6ef6ccb75414:41557 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:28:07,690 INFO [RS:0;6ef6ccb75414:41557 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T19:28:07,691 INFO [RS:0;6ef6ccb75414:41557 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T19:28:07,691 INFO [RS:0;6ef6ccb75414:41557 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T19:28:07,691 DEBUG [RS:0;6ef6ccb75414:41557 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:28:07,691 DEBUG [RS:0;6ef6ccb75414:41557 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:28:07,691 DEBUG [RS:0;6ef6ccb75414:41557 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:28:07,691 DEBUG [RS:0;6ef6ccb75414:41557 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:28:07,692 DEBUG [RS:0;6ef6ccb75414:41557 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:28:07,692 DEBUG [RS:0;6ef6ccb75414:41557 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6ef6ccb75414:0, corePoolSize=2, maxPoolSize=2 2024-11-25T19:28:07,692 DEBUG [RS:0;6ef6ccb75414:41557 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:28:07,692 DEBUG [RS:0;6ef6ccb75414:41557 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:28:07,692 DEBUG [RS:0;6ef6ccb75414:41557 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:28:07,692 DEBUG [RS:0;6ef6ccb75414:41557 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:28:07,692 DEBUG [RS:0;6ef6ccb75414:41557 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:28:07,692 DEBUG [RS:0;6ef6ccb75414:41557 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:28:07,692 DEBUG [RS:0;6ef6ccb75414:41557 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6ef6ccb75414:0, corePoolSize=3, maxPoolSize=3 2024-11-25T19:28:07,692 DEBUG [RS:0;6ef6ccb75414:41557 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0, corePoolSize=3, maxPoolSize=3 2024-11-25T19:28:07,692 INFO [RS:0;6ef6ccb75414:41557 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T19:28:07,692 INFO [RS:0;6ef6ccb75414:41557 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T19:28:07,693 INFO [RS:0;6ef6ccb75414:41557 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:28:07,693 INFO [RS:0;6ef6ccb75414:41557 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T19:28:07,693 INFO [RS:0;6ef6ccb75414:41557 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T19:28:07,693 INFO [RS:0;6ef6ccb75414:41557 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,41557,1732562887438-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T19:28:07,707 INFO [RS:0;6ef6ccb75414:41557 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T19:28:07,707 INFO [RS:0;6ef6ccb75414:41557 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,41557,1732562887438-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:28:07,707 INFO [RS:0;6ef6ccb75414:41557 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:28:07,707 INFO [RS:0;6ef6ccb75414:41557 {}] regionserver.Replication(171): 6ef6ccb75414,41557,1732562887438 started 2024-11-25T19:28:07,720 INFO [RS:0;6ef6ccb75414:41557 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:28:07,720 INFO [RS:0;6ef6ccb75414:41557 {}] regionserver.HRegionServer(1482): Serving as 6ef6ccb75414,41557,1732562887438, RpcServer on 6ef6ccb75414/172.17.0.2:41557, sessionid=0x100785aff370001 2024-11-25T19:28:07,720 DEBUG [RS:0;6ef6ccb75414:41557 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T19:28:07,720 DEBUG [RS:0;6ef6ccb75414:41557 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6ef6ccb75414,41557,1732562887438 2024-11-25T19:28:07,720 DEBUG [RS:0;6ef6ccb75414:41557 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ef6ccb75414,41557,1732562887438' 2024-11-25T19:28:07,720 DEBUG [RS:0;6ef6ccb75414:41557 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T19:28:07,721 DEBUG [RS:0;6ef6ccb75414:41557 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T19:28:07,721 DEBUG [RS:0;6ef6ccb75414:41557 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T19:28:07,721 DEBUG [RS:0;6ef6ccb75414:41557 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T19:28:07,721 DEBUG [RS:0;6ef6ccb75414:41557 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6ef6ccb75414,41557,1732562887438 2024-11-25T19:28:07,721 DEBUG [RS:0;6ef6ccb75414:41557 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ef6ccb75414,41557,1732562887438' 2024-11-25T19:28:07,721 DEBUG [RS:0;6ef6ccb75414:41557 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T19:28:07,721 DEBUG [RS:0;6ef6ccb75414:41557 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T19:28:07,722 DEBUG [RS:0;6ef6ccb75414:41557 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T19:28:07,722 INFO [RS:0;6ef6ccb75414:41557 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T19:28:07,722 INFO [RS:0;6ef6ccb75414:41557 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T19:28:07,752 WARN [6ef6ccb75414:35203 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-25T19:28:07,826 INFO [RS:0;6ef6ccb75414:41557 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ef6ccb75414%2C41557%2C1732562887438, suffix=, logDir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/WALs/6ef6ccb75414,41557,1732562887438, archiveDir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/oldWALs, maxLogs=32 2024-11-25T19:28:07,827 INFO [RS:0;6ef6ccb75414:41557 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C41557%2C1732562887438.1732562887827 2024-11-25T19:28:07,836 INFO [RS:0;6ef6ccb75414:41557 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/WALs/6ef6ccb75414,41557,1732562887438/6ef6ccb75414%2C41557%2C1732562887438.1732562887827 2024-11-25T19:28:07,837 DEBUG [RS:0;6ef6ccb75414:41557 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40775:40775),(127.0.0.1/127.0.0.1:44791:44791)] 2024-11-25T19:28:08,003 DEBUG [6ef6ccb75414:35203 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-25T19:28:08,003 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6ef6ccb75414,41557,1732562887438 2024-11-25T19:28:08,005 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6ef6ccb75414,41557,1732562887438, state=OPENING 2024-11-25T19:28:08,007 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T19:28:08,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:28:08,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41557-0x100785aff370001, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:28:08,009 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T19:28:08,010 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:28:08,010 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:28:08,010 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6ef6ccb75414,41557,1732562887438}] 2024-11-25T19:28:08,165 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T19:28:08,170 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38577, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T19:28:08,176 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T19:28:08,176 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:28:08,178 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ef6ccb75414%2C41557%2C1732562887438.meta, suffix=.meta, logDir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/WALs/6ef6ccb75414,41557,1732562887438, archiveDir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/oldWALs, maxLogs=32 2024-11-25T19:28:08,179 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C41557%2C1732562887438.meta.1732562888179.meta 2024-11-25T19:28:08,186 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/WALs/6ef6ccb75414,41557,1732562887438/6ef6ccb75414%2C41557%2C1732562887438.meta.1732562888179.meta 2024-11-25T19:28:08,189 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40775:40775),(127.0.0.1/127.0.0.1:44791:44791)] 2024-11-25T19:28:08,193 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T19:28:08,193 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T19:28:08,193 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T19:28:08,193 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T19:28:08,193 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T19:28:08,193 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:28:08,193 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T19:28:08,193 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T19:28:08,195 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T19:28:08,195 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T19:28:08,195 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:28:08,196 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:28:08,196 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T19:28:08,197 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T19:28:08,197 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:28:08,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:28:08,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T19:28:08,198 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T19:28:08,198 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:28:08,198 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:28:08,198 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T19:28:08,199 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T19:28:08,199 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:28:08,199 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:28:08,199 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T19:28:08,200 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740 2024-11-25T19:28:08,201 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740 2024-11-25T19:28:08,202 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T19:28:08,202 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T19:28:08,202 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T19:28:08,203 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T19:28:08,204 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=792633, jitterRate=0.007885217666625977}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T19:28:08,204 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T19:28:08,204 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732562888193Writing region info on filesystem at 1732562888193Initializing all the Stores at 1732562888194 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562888194Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562888194Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562888194Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562888194Cleaning up temporary data from old regions at 1732562888202 (+8 ms)Running coprocessor post-open hooks at 1732562888204 (+2 ms)Region opened successfully at 1732562888204 2024-11-25T19:28:08,205 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732562888164 2024-11-25T19:28:08,207 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T19:28:08,207 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T19:28:08,208 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6ef6ccb75414,41557,1732562887438 2024-11-25T19:28:08,209 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6ef6ccb75414,41557,1732562887438, state=OPEN 2024-11-25T19:28:08,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41557-0x100785aff370001, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T19:28:08,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T19:28:08,211 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6ef6ccb75414,41557,1732562887438 2024-11-25T19:28:08,211 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:28:08,211 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:28:08,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T19:28:08,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6ef6ccb75414,41557,1732562887438 in 201 msec 2024-11-25T19:28:08,216 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T19:28:08,216 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 615 msec 2024-11-25T19:28:08,217 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T19:28:08,217 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T19:28:08,219 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T19:28:08,219 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6ef6ccb75414,41557,1732562887438, seqNum=-1] 2024-11-25T19:28:08,219 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T19:28:08,221 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54429, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T19:28:08,227 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 665 msec 2024-11-25T19:28:08,227 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732562888227, completionTime=-1 2024-11-25T19:28:08,227 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-25T19:28:08,227 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T19:28:08,229 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-25T19:28:08,229 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732562948229 2024-11-25T19:28:08,229 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732563008229 2024-11-25T19:28:08,229 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-25T19:28:08,229 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,35203,1732562887392-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:28:08,229 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,35203,1732562887392-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:28:08,229 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,35203,1732562887392-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:28:08,229 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6ef6ccb75414:35203, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:28:08,229 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T19:28:08,230 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T19:28:08,231 DEBUG [master/6ef6ccb75414:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T19:28:08,233 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.768sec 2024-11-25T19:28:08,233 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T19:28:08,233 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T19:28:08,233 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T19:28:08,233 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T19:28:08,233 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T19:28:08,233 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,35203,1732562887392-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T19:28:08,233 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,35203,1732562887392-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T19:28:08,235 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T19:28:08,235 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T19:28:08,235 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,35203,1732562887392-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:28:08,252 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49e87d3f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T19:28:08,252 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6ef6ccb75414,35203,-1 for getting cluster id 2024-11-25T19:28:08,252 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T19:28:08,254 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '1878c821-c6ca-41b5-a02f-143a721c88a7' 2024-11-25T19:28:08,254 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T19:28:08,254 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "1878c821-c6ca-41b5-a02f-143a721c88a7" 2024-11-25T19:28:08,254 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64453c1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T19:28:08,254 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6ef6ccb75414,35203,-1] 2024-11-25T19:28:08,255 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T19:28:08,255 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:28:08,256 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43010, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T19:28:08,257 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b4ed6ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T19:28:08,258 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T19:28:08,259 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6ef6ccb75414,41557,1732562887438, seqNum=-1] 2024-11-25T19:28:08,260 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T19:28:08,261 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43596, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T19:28:08,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6ef6ccb75414,35203,1732562887392 2024-11-25T19:28:08,264 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:28:08,268 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-25T19:28:08,268 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-25T19:28:08,269 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 6ef6ccb75414,35203,1732562887392 2024-11-25T19:28:08,269 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@54cc079f 2024-11-25T19:28:08,269 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T19:28:08,271 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43026, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T19:28:08,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35203 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-25T19:28:08,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35203 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-25T19:28:08,272 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35203 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T19:28:08,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35203 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-25T19:28:08,275 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T19:28:08,275 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:28:08,275 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35203 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-25T19:28:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35203 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T19:28:08,276 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T19:28:08,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741835_1011 (size=381) 2024-11-25T19:28:08,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741835_1011 (size=381) 2024-11-25T19:28:08,286 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => de88f78d5c577744ae8825b46ed080e0, NAME => 'TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291 2024-11-25T19:28:08,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741836_1012 (size=64) 2024-11-25T19:28:08,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:08,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:08,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741836_1012 (size=64) 2024-11-25T19:28:08,292 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:28:08,292 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing de88f78d5c577744ae8825b46ed080e0, disabling compactions & flushes 2024-11-25T19:28:08,292 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0. 2024-11-25T19:28:08,292 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0. 2024-11-25T19:28:08,292 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0. after waiting 0 ms 2024-11-25T19:28:08,292 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0. 2024-11-25T19:28:08,292 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0. 2024-11-25T19:28:08,292 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for de88f78d5c577744ae8825b46ed080e0: Waiting for close lock at 1732562888292Disabling compacts and flushes for region at 1732562888292Disabling writes for close at 1732562888292Writing region close event to WAL at 1732562888292Closed at 1732562888292 2024-11-25T19:28:08,294 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T19:28:08,294 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732562888294"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732562888294"}]},"ts":"1732562888294"} 2024-11-25T19:28:08,296 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-25T19:28:08,297 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T19:28:08,297 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732562888297"}]},"ts":"1732562888297"} 2024-11-25T19:28:08,299 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-25T19:28:08,299 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=de88f78d5c577744ae8825b46ed080e0, ASSIGN}] 2024-11-25T19:28:08,301 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=de88f78d5c577744ae8825b46ed080e0, ASSIGN 2024-11-25T19:28:08,302 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=de88f78d5c577744ae8825b46ed080e0, ASSIGN; state=OFFLINE, location=6ef6ccb75414,41557,1732562887438; forceNewPlan=false, retain=false 2024-11-25T19:28:08,452 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=de88f78d5c577744ae8825b46ed080e0, regionState=OPENING, regionLocation=6ef6ccb75414,41557,1732562887438 2024-11-25T19:28:08,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=de88f78d5c577744ae8825b46ed080e0, ASSIGN because future has completed 2024-11-25T19:28:08,456 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure de88f78d5c577744ae8825b46ed080e0, server=6ef6ccb75414,41557,1732562887438}] 2024-11-25T19:28:08,620 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0. 2024-11-25T19:28:08,620 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => de88f78d5c577744ae8825b46ed080e0, NAME => 'TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0.', STARTKEY => '', ENDKEY => ''} 2024-11-25T19:28:08,620 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling de88f78d5c577744ae8825b46ed080e0 2024-11-25T19:28:08,621 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:28:08,621 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for de88f78d5c577744ae8825b46ed080e0 2024-11-25T19:28:08,621 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for de88f78d5c577744ae8825b46ed080e0 2024-11-25T19:28:08,622 INFO [StoreOpener-de88f78d5c577744ae8825b46ed080e0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region de88f78d5c577744ae8825b46ed080e0 2024-11-25T19:28:08,624 INFO [StoreOpener-de88f78d5c577744ae8825b46ed080e0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region de88f78d5c577744ae8825b46ed080e0 columnFamilyName info 2024-11-25T19:28:08,624 DEBUG [StoreOpener-de88f78d5c577744ae8825b46ed080e0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:28:08,625 INFO [StoreOpener-de88f78d5c577744ae8825b46ed080e0-1 {}] regionserver.HStore(327): Store=de88f78d5c577744ae8825b46ed080e0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:28:08,625 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for de88f78d5c577744ae8825b46ed080e0 2024-11-25T19:28:08,625 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0 2024-11-25T19:28:08,626 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0 2024-11-25T19:28:08,626 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for de88f78d5c577744ae8825b46ed080e0 2024-11-25T19:28:08,626 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for de88f78d5c577744ae8825b46ed080e0 2024-11-25T19:28:08,627 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for de88f78d5c577744ae8825b46ed080e0 2024-11-25T19:28:08,629 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T19:28:08,629 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened de88f78d5c577744ae8825b46ed080e0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=788302, jitterRate=0.002378493547439575}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T19:28:08,629 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for de88f78d5c577744ae8825b46ed080e0 2024-11-25T19:28:08,630 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for de88f78d5c577744ae8825b46ed080e0: Running coprocessor pre-open hook at 1732562888621Writing region info on filesystem at 1732562888621Initializing all the Stores at 1732562888622 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562888622Cleaning up temporary data from old regions at 1732562888626 (+4 ms)Running coprocessor post-open hooks at 1732562888629 (+3 ms)Region opened successfully at 1732562888630 (+1 ms) 2024-11-25T19:28:08,631 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0., pid=6, masterSystemTime=1732562888610 2024-11-25T19:28:08,633 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0. 2024-11-25T19:28:08,633 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0. 2024-11-25T19:28:08,633 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=de88f78d5c577744ae8825b46ed080e0, regionState=OPEN, openSeqNum=2, regionLocation=6ef6ccb75414,41557,1732562887438 2024-11-25T19:28:08,635 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure de88f78d5c577744ae8825b46ed080e0, server=6ef6ccb75414,41557,1732562887438 because future has completed 2024-11-25T19:28:08,639 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-25T19:28:08,639 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure de88f78d5c577744ae8825b46ed080e0, server=6ef6ccb75414,41557,1732562887438 in 181 msec 2024-11-25T19:28:08,641 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-25T19:28:08,641 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=de88f78d5c577744ae8825b46ed080e0, ASSIGN in 340 msec 2024-11-25T19:28:08,642 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T19:28:08,642 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732562888642"}]},"ts":"1732562888642"} 2024-11-25T19:28:08,645 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-25T19:28:08,646 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T19:28:08,648 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 374 msec 2024-11-25T19:28:09,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:09,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:10,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:10,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:11,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:11,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:11,302 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,302 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,302 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,302 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,304 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,331 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,331 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,331 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,333 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,838 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T19:28:11,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,840 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,840 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,844 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,844 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,863 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,863 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,863 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,863 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,864 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,864 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:11,870 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:12,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:12,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:13,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:13,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:13,688 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-25T19:28:13,688 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-25T19:28:14,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:14,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:15,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:15,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:15,385 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-25T19:28:15,385 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-25T19:28:15,386 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T19:28:16,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:16,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:17,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:17,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:18,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:18,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:18,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35203 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T19:28:18,377 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-25T19:28:18,377 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-25T19:28:18,382 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-25T19:28:18,382 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0. 2024-11-25T19:28:18,387 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0., hostname=6ef6ccb75414,41557,1732562887438, seqNum=2] 2024-11-25T19:28:18,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on de88f78d5c577744ae8825b46ed080e0 2024-11-25T19:28:18,402 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing de88f78d5c577744ae8825b46ed080e0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T19:28:18,424 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/.tmp/info/084913e2e59242529b1f5b296273063a is 1080, key is row0001/info:/1732562898389/Put/seqid=0 2024-11-25T19:28:18,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741837_1013 (size=12509) 2024-11-25T19:28:18,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741837_1013 (size=12509) 2024-11-25T19:28:18,429 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/.tmp/info/084913e2e59242529b1f5b296273063a 2024-11-25T19:28:18,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/.tmp/info/084913e2e59242529b1f5b296273063a as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/084913e2e59242529b1f5b296273063a 2024-11-25T19:28:18,444 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41557 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=de88f78d5c577744ae8825b46ed080e0, server=6ef6ccb75414,41557,1732562887438 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-25T19:28:18,447 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/084913e2e59242529b1f5b296273063a, entries=7, sequenceid=11, filesize=12.2 K 2024-11-25T19:28:18,448 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for de88f78d5c577744ae8825b46ed080e0 in 46ms, sequenceid=11, compaction requested=false 2024-11-25T19:28:18,448 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for de88f78d5c577744ae8825b46ed080e0: 2024-11-25T19:28:18,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41557 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:43596 deadline: 1732562908444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=de88f78d5c577744ae8825b46ed080e0, server=6ef6ccb75414,41557,1732562887438 2024-11-25T19:28:18,467 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0., hostname=6ef6ccb75414,41557,1732562887438, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0., hostname=6ef6ccb75414,41557,1732562887438, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=de88f78d5c577744ae8825b46ed080e0, server=6ef6ccb75414,41557,1732562887438 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-25T19:28:18,467 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0., hostname=6ef6ccb75414,41557,1732562887438, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=de88f78d5c577744ae8825b46ed080e0, server=6ef6ccb75414,41557,1732562887438 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-25T19:28:18,467 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0., hostname=6ef6ccb75414,41557,1732562887438, seqNum=2 because the exception is null or not the one we care about 2024-11-25T19:28:19,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:19,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:20,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:20,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:20,889 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T19:28:20,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:20,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:20,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:20,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:20,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:20,892 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:20,893 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:20,893 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:20,919 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:20,919 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:20,919 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:20,919 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:20,919 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:20,920 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:20,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:20,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:20,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:20,926 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:21,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:21,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:22,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:22,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:23,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:23,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:24,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:24,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:25,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:25,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:26,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:26,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:27,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:27,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:28,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:28,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:28,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on de88f78d5c577744ae8825b46ed080e0 2024-11-25T19:28:28,542 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing de88f78d5c577744ae8825b46ed080e0 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-25T19:28:28,550 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/.tmp/info/5c59b0a9805f422facc9ccc6f9bf913c is 1080, key is row0008/info:/1732562898403/Put/seqid=0 2024-11-25T19:28:28,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741838_1014 (size=29761) 2024-11-25T19:28:28,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741838_1014 (size=29761) 2024-11-25T19:28:28,558 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/.tmp/info/5c59b0a9805f422facc9ccc6f9bf913c 2024-11-25T19:28:28,564 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/.tmp/info/5c59b0a9805f422facc9ccc6f9bf913c as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/5c59b0a9805f422facc9ccc6f9bf913c 2024-11-25T19:28:28,569 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/5c59b0a9805f422facc9ccc6f9bf913c, entries=23, sequenceid=37, filesize=29.1 K 2024-11-25T19:28:28,570 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for de88f78d5c577744ae8825b46ed080e0 in 28ms, sequenceid=37, compaction requested=false 2024-11-25T19:28:28,570 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for de88f78d5c577744ae8825b46ed080e0: 2024-11-25T19:28:28,570 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-25T19:28:28,570 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:28:28,570 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/5c59b0a9805f422facc9ccc6f9bf913c because midkey is the same as first or last row 2024-11-25T19:28:29,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:29,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:30,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:30,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:30,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on de88f78d5c577744ae8825b46ed080e0 2024-11-25T19:28:30,568 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing de88f78d5c577744ae8825b46ed080e0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T19:28:30,573 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/.tmp/info/91bf8eeeb38c457c96d883d2c1614452 is 1080, key is row0031/info:/1732562908545/Put/seqid=0 2024-11-25T19:28:30,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741839_1015 (size=12509) 2024-11-25T19:28:30,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741839_1015 (size=12509) 2024-11-25T19:28:30,581 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/.tmp/info/91bf8eeeb38c457c96d883d2c1614452 2024-11-25T19:28:30,587 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/.tmp/info/91bf8eeeb38c457c96d883d2c1614452 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/91bf8eeeb38c457c96d883d2c1614452 2024-11-25T19:28:30,593 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/91bf8eeeb38c457c96d883d2c1614452, entries=7, sequenceid=47, filesize=12.2 K 2024-11-25T19:28:30,594 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for de88f78d5c577744ae8825b46ed080e0 in 27ms, sequenceid=47, compaction requested=true 2024-11-25T19:28:30,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for de88f78d5c577744ae8825b46ed080e0: 2024-11-25T19:28:30,594 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-25T19:28:30,594 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:28:30,594 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/5c59b0a9805f422facc9ccc6f9bf913c because midkey is the same as first or last row 2024-11-25T19:28:30,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store de88f78d5c577744ae8825b46ed080e0:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T19:28:30,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:28:30,594 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T19:28:30,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on de88f78d5c577744ae8825b46ed080e0 2024-11-25T19:28:30,595 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing de88f78d5c577744ae8825b46ed080e0 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-25T19:28:30,596 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T19:28:30,596 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1541): de88f78d5c577744ae8825b46ed080e0/info is initiating minor compaction (all files) 2024-11-25T19:28:30,596 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of de88f78d5c577744ae8825b46ed080e0/info in TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0. 2024-11-25T19:28:30,596 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/084913e2e59242529b1f5b296273063a, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/5c59b0a9805f422facc9ccc6f9bf913c, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/91bf8eeeb38c457c96d883d2c1614452] into tmpdir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/.tmp, totalSize=53.5 K 2024-11-25T19:28:30,597 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting 084913e2e59242529b1f5b296273063a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732562898389 2024-11-25T19:28:30,597 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5c59b0a9805f422facc9ccc6f9bf913c, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732562898403 2024-11-25T19:28:30,597 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting 91bf8eeeb38c457c96d883d2c1614452, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732562908545 2024-11-25T19:28:30,599 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/.tmp/info/7c778e27d98c4cc1a600221a4361798b is 1080, key is row0038/info:/1732562910569/Put/seqid=0 2024-11-25T19:28:30,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741840_1016 (size=17894) 2024-11-25T19:28:30,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741840_1016 (size=17894) 2024-11-25T19:28:30,611 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=62 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/.tmp/info/7c778e27d98c4cc1a600221a4361798b 2024-11-25T19:28:30,616 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): de88f78d5c577744ae8825b46ed080e0#info#compaction#58 average throughput is 12.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T19:28:30,616 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/.tmp/info/093dde6dc70a4fc2ac2fe101e77ce4b0 is 1080, key is row0001/info:/1732562898389/Put/seqid=0 2024-11-25T19:28:30,621 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/.tmp/info/7c778e27d98c4cc1a600221a4361798b as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/7c778e27d98c4cc1a600221a4361798b 2024-11-25T19:28:30,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741841_1017 (size=44978) 2024-11-25T19:28:30,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741841_1017 (size=44978) 2024-11-25T19:28:30,628 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/7c778e27d98c4cc1a600221a4361798b, entries=12, sequenceid=62, filesize=17.5 K 2024-11-25T19:28:30,629 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=14.71 KB/15064 for de88f78d5c577744ae8825b46ed080e0 in 34ms, sequenceid=62, compaction requested=false 2024-11-25T19:28:30,629 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for de88f78d5c577744ae8825b46ed080e0: 2024-11-25T19:28:30,629 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.0 K, sizeToCheck=16.0 K 2024-11-25T19:28:30,629 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:28:30,629 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/5c59b0a9805f422facc9ccc6f9bf913c because midkey is the same as first or last row 2024-11-25T19:28:30,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on de88f78d5c577744ae8825b46ed080e0 2024-11-25T19:28:30,632 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing de88f78d5c577744ae8825b46ed080e0 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-25T19:28:30,633 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/.tmp/info/093dde6dc70a4fc2ac2fe101e77ce4b0 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/093dde6dc70a4fc2ac2fe101e77ce4b0 2024-11-25T19:28:30,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/.tmp/info/506bdeb7bdee4b3e95f718d1b3c91f6c is 1080, key is row0050/info:/1732562910597/Put/seqid=0 2024-11-25T19:28:30,639 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in de88f78d5c577744ae8825b46ed080e0/info of de88f78d5c577744ae8825b46ed080e0 into 093dde6dc70a4fc2ac2fe101e77ce4b0(size=43.9 K), total size for store is 61.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T19:28:30,639 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for de88f78d5c577744ae8825b46ed080e0: 2024-11-25T19:28:30,640 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0., storeName=de88f78d5c577744ae8825b46ed080e0/info, priority=13, startTime=1732562910594; duration=0sec 2024-11-25T19:28:30,640 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=61.4 K, sizeToCheck=16.0 K 2024-11-25T19:28:30,640 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:28:30,640 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/093dde6dc70a4fc2ac2fe101e77ce4b0 because midkey is the same as first or last row 2024-11-25T19:28:30,640 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=61.4 K, sizeToCheck=16.0 K 2024-11-25T19:28:30,640 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:28:30,640 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/093dde6dc70a4fc2ac2fe101e77ce4b0 because midkey is the same as first or last row 2024-11-25T19:28:30,640 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=61.4 K, sizeToCheck=16.0 K 2024-11-25T19:28:30,640 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:28:30,640 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/093dde6dc70a4fc2ac2fe101e77ce4b0 because midkey is the same as first or last row 2024-11-25T19:28:30,640 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:28:30,640 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: de88f78d5c577744ae8825b46ed080e0:info 2024-11-25T19:28:30,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741842_1018 (size=21141) 2024-11-25T19:28:30,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741842_1018 (size=21141) 2024-11-25T19:28:30,642 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/.tmp/info/506bdeb7bdee4b3e95f718d1b3c91f6c 2024-11-25T19:28:30,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/.tmp/info/506bdeb7bdee4b3e95f718d1b3c91f6c as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/506bdeb7bdee4b3e95f718d1b3c91f6c 2024-11-25T19:28:30,653 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/506bdeb7bdee4b3e95f718d1b3c91f6c, entries=15, sequenceid=80, filesize=20.6 K 2024-11-25T19:28:30,654 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=0 B/0 for de88f78d5c577744ae8825b46ed080e0 in 22ms, sequenceid=80, compaction requested=true 2024-11-25T19:28:30,654 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for de88f78d5c577744ae8825b46ed080e0: 2024-11-25T19:28:30,655 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=82.0 K, sizeToCheck=16.0 K 2024-11-25T19:28:30,655 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:28:30,655 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/093dde6dc70a4fc2ac2fe101e77ce4b0 because midkey is the same as first or last row 2024-11-25T19:28:30,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store de88f78d5c577744ae8825b46ed080e0:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T19:28:30,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:28:30,655 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T19:28:30,656 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 84013 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T19:28:30,656 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1541): de88f78d5c577744ae8825b46ed080e0/info is initiating minor compaction (all files) 2024-11-25T19:28:30,656 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of de88f78d5c577744ae8825b46ed080e0/info in TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0. 2024-11-25T19:28:30,656 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/093dde6dc70a4fc2ac2fe101e77ce4b0, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/7c778e27d98c4cc1a600221a4361798b, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/506bdeb7bdee4b3e95f718d1b3c91f6c] into tmpdir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/.tmp, totalSize=82.0 K 2024-11-25T19:28:30,656 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting 093dde6dc70a4fc2ac2fe101e77ce4b0, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732562898389 2024-11-25T19:28:30,657 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7c778e27d98c4cc1a600221a4361798b, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=62, earliestPutTs=1732562910569 2024-11-25T19:28:30,657 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting 506bdeb7bdee4b3e95f718d1b3c91f6c, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732562910597 2024-11-25T19:28:30,669 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): de88f78d5c577744ae8825b46ed080e0#info#compaction#60 average throughput is 21.89 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T19:28:30,670 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/.tmp/info/c463738301804307ba2a8727374d8e09 is 1080, key is row0001/info:/1732562898389/Put/seqid=0 2024-11-25T19:28:30,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741843_1019 (size=74301) 2024-11-25T19:28:30,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741843_1019 (size=74301) 2024-11-25T19:28:30,680 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/.tmp/info/c463738301804307ba2a8727374d8e09 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/c463738301804307ba2a8727374d8e09 2024-11-25T19:28:30,686 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in de88f78d5c577744ae8825b46ed080e0/info of de88f78d5c577744ae8825b46ed080e0 into c463738301804307ba2a8727374d8e09(size=72.6 K), total size for store is 72.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T19:28:30,686 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for de88f78d5c577744ae8825b46ed080e0: 2024-11-25T19:28:30,686 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0., storeName=de88f78d5c577744ae8825b46ed080e0/info, priority=13, startTime=1732562910655; duration=0sec 2024-11-25T19:28:30,686 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-25T19:28:30,686 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:28:30,686 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-25T19:28:30,686 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:28:30,686 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-25T19:28:30,686 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T19:28:30,687 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:28:30,687 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:28:30,688 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: de88f78d5c577744ae8825b46ed080e0:info 2024-11-25T19:28:30,689 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35203 {}] assignment.AssignmentManager(1363): Split request from 6ef6ccb75414,41557,1732562887438, parent={ENCODED => de88f78d5c577744ae8825b46ed080e0, NAME => 'TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-25T19:28:30,694 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35203 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=6ef6ccb75414,41557,1732562887438 2024-11-25T19:28:30,698 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35203 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=de88f78d5c577744ae8825b46ed080e0, daughterA=fd0e73512ad398fb0fd0eb24ae1fd9ef, daughterB=0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:30,700 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=de88f78d5c577744ae8825b46ed080e0, daughterA=fd0e73512ad398fb0fd0eb24ae1fd9ef, daughterB=0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:30,700 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=de88f78d5c577744ae8825b46ed080e0, daughterA=fd0e73512ad398fb0fd0eb24ae1fd9ef, daughterB=0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:30,700 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=de88f78d5c577744ae8825b46ed080e0, daughterA=fd0e73512ad398fb0fd0eb24ae1fd9ef, daughterB=0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:30,706 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=de88f78d5c577744ae8825b46ed080e0, UNASSIGN}] 2024-11-25T19:28:30,708 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=de88f78d5c577744ae8825b46ed080e0, UNASSIGN 2024-11-25T19:28:30,709 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=de88f78d5c577744ae8825b46ed080e0, regionState=CLOSING, regionLocation=6ef6ccb75414,41557,1732562887438 2024-11-25T19:28:30,712 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=de88f78d5c577744ae8825b46ed080e0, UNASSIGN because future has completed 2024-11-25T19:28:30,712 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-25T19:28:30,713 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure de88f78d5c577744ae8825b46ed080e0, server=6ef6ccb75414,41557,1732562887438}] 2024-11-25T19:28:30,869 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close de88f78d5c577744ae8825b46ed080e0 2024-11-25T19:28:30,869 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-25T19:28:30,870 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing de88f78d5c577744ae8825b46ed080e0, disabling compactions & flushes 2024-11-25T19:28:30,870 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0. 2024-11-25T19:28:30,870 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0. 2024-11-25T19:28:30,870 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0. after waiting 0 ms 2024-11-25T19:28:30,870 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0. 2024-11-25T19:28:30,871 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/084913e2e59242529b1f5b296273063a, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/5c59b0a9805f422facc9ccc6f9bf913c, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/093dde6dc70a4fc2ac2fe101e77ce4b0, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/91bf8eeeb38c457c96d883d2c1614452, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/7c778e27d98c4cc1a600221a4361798b, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/506bdeb7bdee4b3e95f718d1b3c91f6c] to archive 2024-11-25T19:28:30,872 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T19:28:30,874 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/084913e2e59242529b1f5b296273063a to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/084913e2e59242529b1f5b296273063a 2024-11-25T19:28:30,875 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/5c59b0a9805f422facc9ccc6f9bf913c to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/5c59b0a9805f422facc9ccc6f9bf913c 2024-11-25T19:28:30,877 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/093dde6dc70a4fc2ac2fe101e77ce4b0 to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/093dde6dc70a4fc2ac2fe101e77ce4b0 2024-11-25T19:28:30,879 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/91bf8eeeb38c457c96d883d2c1614452 to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/91bf8eeeb38c457c96d883d2c1614452 2024-11-25T19:28:30,880 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/7c778e27d98c4cc1a600221a4361798b to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/7c778e27d98c4cc1a600221a4361798b 2024-11-25T19:28:30,882 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/506bdeb7bdee4b3e95f718d1b3c91f6c to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/506bdeb7bdee4b3e95f718d1b3c91f6c 2024-11-25T19:28:30,889 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=1 2024-11-25T19:28:30,890 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0. 2024-11-25T19:28:30,890 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for de88f78d5c577744ae8825b46ed080e0: Waiting for close lock at 1732562910870Running coprocessor pre-close hooks at 1732562910870Disabling compacts and flushes for region at 1732562910870Disabling writes for close at 1732562910870Writing region close event to WAL at 1732562910884 (+14 ms)Running coprocessor post-close hooks at 1732562910890 (+6 ms)Closed at 1732562910890 2024-11-25T19:28:30,892 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed de88f78d5c577744ae8825b46ed080e0 2024-11-25T19:28:30,893 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=de88f78d5c577744ae8825b46ed080e0, regionState=CLOSED 2024-11-25T19:28:30,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure de88f78d5c577744ae8825b46ed080e0, server=6ef6ccb75414,41557,1732562887438 because future has completed 2024-11-25T19:28:30,898 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-25T19:28:30,898 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure de88f78d5c577744ae8825b46ed080e0, server=6ef6ccb75414,41557,1732562887438 in 183 msec 2024-11-25T19:28:30,900 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-25T19:28:30,900 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=de88f78d5c577744ae8825b46ed080e0, UNASSIGN in 192 msec 2024-11-25T19:28:30,909 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:28:30,911 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 1 storefiles, region=de88f78d5c577744ae8825b46ed080e0, threads=1 2024-11-25T19:28:30,914 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/c463738301804307ba2a8727374d8e09 for region: de88f78d5c577744ae8825b46ed080e0 2024-11-25T19:28:30,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741844_1020 (size=27) 2024-11-25T19:28:30,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741844_1020 (size=27) 2024-11-25T19:28:30,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741845_1021 (size=27) 2024-11-25T19:28:30,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741845_1021 (size=27) 2024-11-25T19:28:30,939 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/c463738301804307ba2a8727374d8e09 for region: de88f78d5c577744ae8825b46ed080e0 2024-11-25T19:28:30,941 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region de88f78d5c577744ae8825b46ed080e0 Daughter A: [hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/fd0e73512ad398fb0fd0eb24ae1fd9ef/info/c463738301804307ba2a8727374d8e09.de88f78d5c577744ae8825b46ed080e0] storefiles, Daughter B: [hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/c463738301804307ba2a8727374d8e09.de88f78d5c577744ae8825b46ed080e0] storefiles. 2024-11-25T19:28:30,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741846_1022 (size=71) 2024-11-25T19:28:30,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741846_1022 (size=71) 2024-11-25T19:28:30,954 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:28:30,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741847_1023 (size=71) 2024-11-25T19:28:30,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741847_1023 (size=71) 2024-11-25T19:28:30,970 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:28:30,977 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/fd0e73512ad398fb0fd0eb24ae1fd9ef/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=-1 2024-11-25T19:28:30,979 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=-1 2024-11-25T19:28:30,982 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732562910981"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732562910981"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732562910981"}]},"ts":"1732562910981"} 2024-11-25T19:28:30,982 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732562910694.fd0e73512ad398fb0fd0eb24ae1fd9ef.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732562910981"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732562910981"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732562910981"}]},"ts":"1732562910981"} 2024-11-25T19:28:30,982 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732562910981"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732562910981"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732562910981"}]},"ts":"1732562910981"} 2024-11-25T19:28:31,000 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fd0e73512ad398fb0fd0eb24ae1fd9ef, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0ef29e67aab69c72fb122bbdfdd77c60, ASSIGN}] 2024-11-25T19:28:31,001 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0ef29e67aab69c72fb122bbdfdd77c60, ASSIGN 2024-11-25T19:28:31,001 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fd0e73512ad398fb0fd0eb24ae1fd9ef, ASSIGN 2024-11-25T19:28:31,002 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0ef29e67aab69c72fb122bbdfdd77c60, ASSIGN; state=SPLITTING_NEW, location=6ef6ccb75414,41557,1732562887438; forceNewPlan=false, retain=false 2024-11-25T19:28:31,002 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fd0e73512ad398fb0fd0eb24ae1fd9ef, ASSIGN; state=SPLITTING_NEW, location=6ef6ccb75414,41557,1732562887438; forceNewPlan=false, retain=false 2024-11-25T19:28:31,153 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=0ef29e67aab69c72fb122bbdfdd77c60, regionState=OPENING, regionLocation=6ef6ccb75414,41557,1732562887438 2024-11-25T19:28:31,153 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=fd0e73512ad398fb0fd0eb24ae1fd9ef, regionState=OPENING, regionLocation=6ef6ccb75414,41557,1732562887438 2024-11-25T19:28:31,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0ef29e67aab69c72fb122bbdfdd77c60, ASSIGN because future has completed 2024-11-25T19:28:31,158 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0ef29e67aab69c72fb122bbdfdd77c60, server=6ef6ccb75414,41557,1732562887438}] 2024-11-25T19:28:31,160 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fd0e73512ad398fb0fd0eb24ae1fd9ef, ASSIGN because future has completed 2024-11-25T19:28:31,161 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure fd0e73512ad398fb0fd0eb24ae1fd9ef, server=6ef6ccb75414,41557,1732562887438}] 2024-11-25T19:28:31,316 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60. 2024-11-25T19:28:31,317 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 0ef29e67aab69c72fb122bbdfdd77c60, NAME => 'TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-25T19:28:31,317 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:31,317 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:28:31,317 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:31,317 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:31,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:31,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:31,318 INFO [StoreOpener-0ef29e67aab69c72fb122bbdfdd77c60-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:31,319 INFO [StoreOpener-0ef29e67aab69c72fb122bbdfdd77c60-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0ef29e67aab69c72fb122bbdfdd77c60 columnFamilyName info 2024-11-25T19:28:31,319 DEBUG [StoreOpener-0ef29e67aab69c72fb122bbdfdd77c60-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:28:31,328 DEBUG [StoreOpener-0ef29e67aab69c72fb122bbdfdd77c60-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/c463738301804307ba2a8727374d8e09.de88f78d5c577744ae8825b46ed080e0->hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/c463738301804307ba2a8727374d8e09-top 2024-11-25T19:28:31,329 INFO [StoreOpener-0ef29e67aab69c72fb122bbdfdd77c60-1 {}] regionserver.HStore(327): Store=0ef29e67aab69c72fb122bbdfdd77c60/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:28:31,329 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:31,330 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:31,331 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:31,332 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:31,332 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:31,333 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:31,334 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 0ef29e67aab69c72fb122bbdfdd77c60; next sequenceid=86; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=708854, jitterRate=-0.09864608943462372}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T19:28:31,334 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:31,334 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 0ef29e67aab69c72fb122bbdfdd77c60: Running coprocessor pre-open hook at 1732562911317Writing region info on filesystem at 1732562911317Initializing all the Stores at 1732562911318 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562911318Cleaning up temporary data from old regions at 1732562911332 (+14 ms)Running coprocessor post-open hooks at 1732562911334 (+2 ms)Region opened successfully at 1732562911334 2024-11-25T19:28:31,335 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60., pid=12, masterSystemTime=1732562911313 2024-11-25T19:28:31,335 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 0ef29e67aab69c72fb122bbdfdd77c60:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T19:28:31,335 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:28:31,336 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-25T19:28:31,336 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60. 2024-11-25T19:28:31,336 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1541): 0ef29e67aab69c72fb122bbdfdd77c60/info is initiating minor compaction (all files) 2024-11-25T19:28:31,336 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0ef29e67aab69c72fb122bbdfdd77c60/info in TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60. 2024-11-25T19:28:31,336 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/c463738301804307ba2a8727374d8e09.de88f78d5c577744ae8825b46ed080e0->hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/c463738301804307ba2a8727374d8e09-top] into tmpdir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp, totalSize=72.6 K 2024-11-25T19:28:31,337 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting c463738301804307ba2a8727374d8e09.de88f78d5c577744ae8825b46ed080e0, keycount=32, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732562898389 2024-11-25T19:28:31,338 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60. 2024-11-25T19:28:31,338 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60. 2024-11-25T19:28:31,338 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732562910694.fd0e73512ad398fb0fd0eb24ae1fd9ef. 2024-11-25T19:28:31,338 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => fd0e73512ad398fb0fd0eb24ae1fd9ef, NAME => 'TestLogRolling-testLogRolling,,1732562910694.fd0e73512ad398fb0fd0eb24ae1fd9ef.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-25T19:28:31,338 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling fd0e73512ad398fb0fd0eb24ae1fd9ef 2024-11-25T19:28:31,338 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732562910694.fd0e73512ad398fb0fd0eb24ae1fd9ef.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:28:31,339 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for fd0e73512ad398fb0fd0eb24ae1fd9ef 2024-11-25T19:28:31,339 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for fd0e73512ad398fb0fd0eb24ae1fd9ef 2024-11-25T19:28:31,339 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=0ef29e67aab69c72fb122bbdfdd77c60, regionState=OPEN, openSeqNum=86, regionLocation=6ef6ccb75414,41557,1732562887438 2024-11-25T19:28:31,340 INFO [StoreOpener-fd0e73512ad398fb0fd0eb24ae1fd9ef-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region fd0e73512ad398fb0fd0eb24ae1fd9ef 2024-11-25T19:28:31,340 INFO [StoreOpener-fd0e73512ad398fb0fd0eb24ae1fd9ef-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fd0e73512ad398fb0fd0eb24ae1fd9ef columnFamilyName info 2024-11-25T19:28:31,341 DEBUG [StoreOpener-fd0e73512ad398fb0fd0eb24ae1fd9ef-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:28:31,341 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-25T19:28:31,341 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-25T19:28:31,341 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-25T19:28:31,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0ef29e67aab69c72fb122bbdfdd77c60, server=6ef6ccb75414,41557,1732562887438 because future has completed 2024-11-25T19:28:31,345 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-25T19:28:31,345 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 0ef29e67aab69c72fb122bbdfdd77c60, server=6ef6ccb75414,41557,1732562887438 in 185 msec 2024-11-25T19:28:31,347 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0ef29e67aab69c72fb122bbdfdd77c60, ASSIGN in 346 msec 2024-11-25T19:28:31,357 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ef29e67aab69c72fb122bbdfdd77c60#info#compaction#61 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T19:28:31,357 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/b1c5e9f45571448e833b13cbad432fc9 is 1080, key is row0062/info:/1732562910623/Put/seqid=0 2024-11-25T19:28:31,358 DEBUG [StoreOpener-fd0e73512ad398fb0fd0eb24ae1fd9ef-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/fd0e73512ad398fb0fd0eb24ae1fd9ef/info/c463738301804307ba2a8727374d8e09.de88f78d5c577744ae8825b46ed080e0->hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/c463738301804307ba2a8727374d8e09-bottom 2024-11-25T19:28:31,358 INFO [StoreOpener-fd0e73512ad398fb0fd0eb24ae1fd9ef-1 {}] regionserver.HStore(327): Store=fd0e73512ad398fb0fd0eb24ae1fd9ef/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:28:31,359 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for fd0e73512ad398fb0fd0eb24ae1fd9ef 2024-11-25T19:28:31,359 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/fd0e73512ad398fb0fd0eb24ae1fd9ef 2024-11-25T19:28:31,361 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/fd0e73512ad398fb0fd0eb24ae1fd9ef 2024-11-25T19:28:31,361 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for fd0e73512ad398fb0fd0eb24ae1fd9ef 2024-11-25T19:28:31,361 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for fd0e73512ad398fb0fd0eb24ae1fd9ef 2024-11-25T19:28:31,362 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for fd0e73512ad398fb0fd0eb24ae1fd9ef 2024-11-25T19:28:31,363 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened fd0e73512ad398fb0fd0eb24ae1fd9ef; next sequenceid=86; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=771987, jitterRate=-0.018368467688560486}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T19:28:31,363 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fd0e73512ad398fb0fd0eb24ae1fd9ef 2024-11-25T19:28:31,363 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for fd0e73512ad398fb0fd0eb24ae1fd9ef: Running coprocessor pre-open hook at 1732562911339Writing region info on filesystem at 1732562911339Initializing all the Stores at 1732562911339Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562911339Cleaning up temporary data from old regions at 1732562911361 (+22 ms)Running coprocessor post-open hooks at 1732562911363 (+2 ms)Region opened successfully at 1732562911363 2024-11-25T19:28:31,364 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732562910694.fd0e73512ad398fb0fd0eb24ae1fd9ef., pid=13, masterSystemTime=1732562911313 2024-11-25T19:28:31,364 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store fd0e73512ad398fb0fd0eb24ae1fd9ef:info, priority=-2147483648, current under compaction store size is 2 2024-11-25T19:28:31,364 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:28:31,364 DEBUG [RS:0;6ef6ccb75414:41557-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-25T19:28:31,364 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740/.tmp/info/048a14870fd6468da67b7dee1db8216e is 193, key is TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60./info:regioninfo/1732562911338/Put/seqid=0 2024-11-25T19:28:31,365 INFO [RS:0;6ef6ccb75414:41557-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732562910694.fd0e73512ad398fb0fd0eb24ae1fd9ef. 2024-11-25T19:28:31,365 DEBUG [RS:0;6ef6ccb75414:41557-longCompactions-0 {}] regionserver.HStore(1541): fd0e73512ad398fb0fd0eb24ae1fd9ef/info is initiating minor compaction (all files) 2024-11-25T19:28:31,365 INFO [RS:0;6ef6ccb75414:41557-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of fd0e73512ad398fb0fd0eb24ae1fd9ef/info in TestLogRolling-testLogRolling,,1732562910694.fd0e73512ad398fb0fd0eb24ae1fd9ef. 2024-11-25T19:28:31,365 INFO [RS:0;6ef6ccb75414:41557-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/fd0e73512ad398fb0fd0eb24ae1fd9ef/info/c463738301804307ba2a8727374d8e09.de88f78d5c577744ae8825b46ed080e0->hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/c463738301804307ba2a8727374d8e09-bottom] into tmpdir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/fd0e73512ad398fb0fd0eb24ae1fd9ef/.tmp, totalSize=72.6 K 2024-11-25T19:28:31,366 DEBUG [RS:0;6ef6ccb75414:41557-longCompactions-0 {}] compactions.Compactor(225): Compacting c463738301804307ba2a8727374d8e09.de88f78d5c577744ae8825b46ed080e0, keycount=32, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732562898389 2024-11-25T19:28:31,367 DEBUG [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732562910694.fd0e73512ad398fb0fd0eb24ae1fd9ef. 2024-11-25T19:28:31,367 INFO [RS_OPEN_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732562910694.fd0e73512ad398fb0fd0eb24ae1fd9ef. 2024-11-25T19:28:31,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741848_1024 (size=8260) 2024-11-25T19:28:31,368 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=fd0e73512ad398fb0fd0eb24ae1fd9ef, regionState=OPEN, openSeqNum=86, regionLocation=6ef6ccb75414,41557,1732562887438 2024-11-25T19:28:31,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741848_1024 (size=8260) 2024-11-25T19:28:31,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure fd0e73512ad398fb0fd0eb24ae1fd9ef, server=6ef6ccb75414,41557,1732562887438 because future has completed 2024-11-25T19:28:31,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741849_1025 (size=9882) 2024-11-25T19:28:31,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741849_1025 (size=9882) 2024-11-25T19:28:31,376 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740/.tmp/info/048a14870fd6468da67b7dee1db8216e 2024-11-25T19:28:31,377 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-25T19:28:31,378 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure fd0e73512ad398fb0fd0eb24ae1fd9ef, server=6ef6ccb75414,41557,1732562887438 in 214 msec 2024-11-25T19:28:31,380 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-11-25T19:28:31,380 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fd0e73512ad398fb0fd0eb24ae1fd9ef, ASSIGN in 379 msec 2024-11-25T19:28:31,381 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=de88f78d5c577744ae8825b46ed080e0, daughterA=fd0e73512ad398fb0fd0eb24ae1fd9ef, daughterB=0ef29e67aab69c72fb122bbdfdd77c60 in 686 msec 2024-11-25T19:28:31,386 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/b1c5e9f45571448e833b13cbad432fc9 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/b1c5e9f45571448e833b13cbad432fc9 2024-11-25T19:28:31,391 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 0ef29e67aab69c72fb122bbdfdd77c60/info of 0ef29e67aab69c72fb122bbdfdd77c60 into b1c5e9f45571448e833b13cbad432fc9(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T19:28:31,391 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:28:31,391 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60., storeName=0ef29e67aab69c72fb122bbdfdd77c60/info, priority=15, startTime=1732562911335; duration=0sec 2024-11-25T19:28:31,392 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:28:31,392 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ef29e67aab69c72fb122bbdfdd77c60:info 2024-11-25T19:28:31,394 INFO [RS:0;6ef6ccb75414:41557-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fd0e73512ad398fb0fd0eb24ae1fd9ef#info#compaction#63 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T19:28:31,394 DEBUG [RS:0;6ef6ccb75414:41557-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/fd0e73512ad398fb0fd0eb24ae1fd9ef/.tmp/info/d2963d0a33bf4ab89bcd83c86aec3661 is 1080, key is row0001/info:/1732562898389/Put/seqid=0 2024-11-25T19:28:31,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741850_1026 (size=70862) 2024-11-25T19:28:31,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741850_1026 (size=70862) 2024-11-25T19:28:31,404 DEBUG [RS:0;6ef6ccb75414:41557-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/fd0e73512ad398fb0fd0eb24ae1fd9ef/.tmp/info/d2963d0a33bf4ab89bcd83c86aec3661 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/fd0e73512ad398fb0fd0eb24ae1fd9ef/info/d2963d0a33bf4ab89bcd83c86aec3661 2024-11-25T19:28:31,406 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740/.tmp/ns/257ddd2207fe47f8930b479a6912b481 is 43, key is default/ns:d/1732562888222/Put/seqid=0 2024-11-25T19:28:31,409 INFO [RS:0;6ef6ccb75414:41557-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in fd0e73512ad398fb0fd0eb24ae1fd9ef/info of fd0e73512ad398fb0fd0eb24ae1fd9ef into d2963d0a33bf4ab89bcd83c86aec3661(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T19:28:31,409 DEBUG [RS:0;6ef6ccb75414:41557-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for fd0e73512ad398fb0fd0eb24ae1fd9ef: 2024-11-25T19:28:31,409 INFO [RS:0;6ef6ccb75414:41557-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732562910694.fd0e73512ad398fb0fd0eb24ae1fd9ef., storeName=fd0e73512ad398fb0fd0eb24ae1fd9ef/info, priority=15, startTime=1732562911364; duration=0sec 2024-11-25T19:28:31,410 DEBUG [RS:0;6ef6ccb75414:41557-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:28:31,410 DEBUG [RS:0;6ef6ccb75414:41557-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fd0e73512ad398fb0fd0eb24ae1fd9ef:info 2024-11-25T19:28:31,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741851_1027 (size=5153) 2024-11-25T19:28:31,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741851_1027 (size=5153) 2024-11-25T19:28:31,411 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740/.tmp/ns/257ddd2207fe47f8930b479a6912b481 2024-11-25T19:28:31,434 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740/.tmp/table/f8106d6354764b408657d41d24c92e74 is 65, key is TestLogRolling-testLogRolling/table:state/1732562888642/Put/seqid=0 2024-11-25T19:28:31,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741852_1028 (size=5340) 2024-11-25T19:28:31,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741852_1028 (size=5340) 2024-11-25T19:28:31,439 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740/.tmp/table/f8106d6354764b408657d41d24c92e74 2024-11-25T19:28:31,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740/.tmp/info/048a14870fd6468da67b7dee1db8216e as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740/info/048a14870fd6468da67b7dee1db8216e 2024-11-25T19:28:31,449 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740/info/048a14870fd6468da67b7dee1db8216e, entries=30, sequenceid=17, filesize=9.7 K 2024-11-25T19:28:31,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740/.tmp/ns/257ddd2207fe47f8930b479a6912b481 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740/ns/257ddd2207fe47f8930b479a6912b481 2024-11-25T19:28:31,455 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740/ns/257ddd2207fe47f8930b479a6912b481, entries=2, sequenceid=17, filesize=5.0 K 2024-11-25T19:28:31,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740/.tmp/table/f8106d6354764b408657d41d24c92e74 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740/table/f8106d6354764b408657d41d24c92e74 2024-11-25T19:28:31,460 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740/table/f8106d6354764b408657d41d24c92e74, entries=2, sequenceid=17, filesize=5.2 K 2024-11-25T19:28:31,461 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 120ms, sequenceid=17, compaction requested=false 2024-11-25T19:28:31,461 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-25T19:28:32,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:32,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:32,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41557 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:43596 deadline: 1732562922635, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0. is not online on 6ef6ccb75414,41557,1732562887438 2024-11-25T19:28:32,638 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0., hostname=6ef6ccb75414,41557,1732562887438, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0., hostname=6ef6ccb75414,41557,1732562887438, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0. is not online on 6ef6ccb75414,41557,1732562887438 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-25T19:28:32,639 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0., hostname=6ef6ccb75414,41557,1732562887438, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0. is not online on 6ef6ccb75414,41557,1732562887438 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-25T19:28:32,639 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732562888271.de88f78d5c577744ae8825b46ed080e0., hostname=6ef6ccb75414,41557,1732562887438, seqNum=2 from cache 2024-11-25T19:28:33,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:33,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:34,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:34,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:35,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:35,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:35,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:35,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:35,892 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:35,892 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:35,893 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:35,893 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:35,895 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:35,895 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:35,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:35,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:35,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:35,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:35,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:35,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:35,920 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:35,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:35,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:35,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:36,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:36,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:36,431 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T19:28:36,434 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:36,434 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:36,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:36,436 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:36,436 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:36,437 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:36,439 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:36,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:36,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:36,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:36,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:36,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:36,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:36,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:36,464 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:36,465 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:36,465 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:36,467 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T19:28:37,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:37,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:37,365 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T19:28:38,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:38,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:39,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:39,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:40,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:40,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:41,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:41,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:42,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:42,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:42,668 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0065', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60., hostname=6ef6ccb75414,41557,1732562887438, seqNum=86] 2024-11-25T19:28:42,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:42,680 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0ef29e67aab69c72fb122bbdfdd77c60 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T19:28:42,684 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/e685a3f048364591828d8ae6de8a4874 is 1080, key is row0065/info:/1732562922669/Put/seqid=0 2024-11-25T19:28:42,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741853_1029 (size=12509) 2024-11-25T19:28:42,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741853_1029 (size=12509) 2024-11-25T19:28:42,690 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/e685a3f048364591828d8ae6de8a4874 2024-11-25T19:28:42,697 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/e685a3f048364591828d8ae6de8a4874 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/e685a3f048364591828d8ae6de8a4874 2024-11-25T19:28:42,703 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/e685a3f048364591828d8ae6de8a4874, entries=7, sequenceid=96, filesize=12.2 K 2024-11-25T19:28:42,704 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 0ef29e67aab69c72fb122bbdfdd77c60 in 24ms, sequenceid=96, compaction requested=false 2024-11-25T19:28:42,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:28:42,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:42,706 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0ef29e67aab69c72fb122bbdfdd77c60 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-25T19:28:42,711 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/9f95377d189043f48b5ba22e960707bc is 1080, key is row0072/info:/1732562922681/Put/seqid=0 2024-11-25T19:28:42,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741854_1030 (size=17894) 2024-11-25T19:28:42,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741854_1030 (size=17894) 2024-11-25T19:28:42,716 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=111 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/9f95377d189043f48b5ba22e960707bc 2024-11-25T19:28:42,722 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/9f95377d189043f48b5ba22e960707bc as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/9f95377d189043f48b5ba22e960707bc 2024-11-25T19:28:42,729 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/9f95377d189043f48b5ba22e960707bc, entries=12, sequenceid=111, filesize=17.5 K 2024-11-25T19:28:42,730 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for 0ef29e67aab69c72fb122bbdfdd77c60 in 23ms, sequenceid=111, compaction requested=true 2024-11-25T19:28:42,730 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:28:42,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ef29e67aab69c72fb122bbdfdd77c60:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T19:28:42,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:28:42,730 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T19:28:42,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:42,731 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0ef29e67aab69c72fb122bbdfdd77c60 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-25T19:28:42,731 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38663 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T19:28:42,731 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1541): 0ef29e67aab69c72fb122bbdfdd77c60/info is initiating minor compaction (all files) 2024-11-25T19:28:42,731 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0ef29e67aab69c72fb122bbdfdd77c60/info in TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60. 2024-11-25T19:28:42,731 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/b1c5e9f45571448e833b13cbad432fc9, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/e685a3f048364591828d8ae6de8a4874, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/9f95377d189043f48b5ba22e960707bc] into tmpdir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp, totalSize=37.8 K 2024-11-25T19:28:42,732 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting b1c5e9f45571448e833b13cbad432fc9, keycount=3, bloomtype=ROW, size=8.1 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732562910623 2024-11-25T19:28:42,732 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting e685a3f048364591828d8ae6de8a4874, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732562922669 2024-11-25T19:28:42,732 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9f95377d189043f48b5ba22e960707bc, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=111, earliestPutTs=1732562922681 2024-11-25T19:28:42,735 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/2e07ebca42f44f5188828926d305fdba is 1080, key is row0084/info:/1732562922707/Put/seqid=0 2024-11-25T19:28:42,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741855_1031 (size=17894) 2024-11-25T19:28:42,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741855_1031 (size=17894) 2024-11-25T19:28:42,739 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/2e07ebca42f44f5188828926d305fdba 2024-11-25T19:28:42,743 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ef29e67aab69c72fb122bbdfdd77c60#info#compaction#69 average throughput is 22.58 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T19:28:42,743 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/f3b3d2b9917349209cd9a28d53f1bed1 is 1080, key is row0062/info:/1732562910623/Put/seqid=0 2024-11-25T19:28:42,745 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/2e07ebca42f44f5188828926d305fdba as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/2e07ebca42f44f5188828926d305fdba 2024-11-25T19:28:42,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741856_1032 (size=28855) 2024-11-25T19:28:42,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741856_1032 (size=28855) 2024-11-25T19:28:42,751 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/2e07ebca42f44f5188828926d305fdba, entries=12, sequenceid=126, filesize=17.5 K 2024-11-25T19:28:42,752 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=1.05 KB/1076 for 0ef29e67aab69c72fb122bbdfdd77c60 in 21ms, sequenceid=126, compaction requested=false 2024-11-25T19:28:42,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:28:42,753 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/f3b3d2b9917349209cd9a28d53f1bed1 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/f3b3d2b9917349209cd9a28d53f1bed1 2024-11-25T19:28:42,759 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0ef29e67aab69c72fb122bbdfdd77c60/info of 0ef29e67aab69c72fb122bbdfdd77c60 into f3b3d2b9917349209cd9a28d53f1bed1(size=28.2 K), total size for store is 45.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T19:28:42,759 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:28:42,759 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60., storeName=0ef29e67aab69c72fb122bbdfdd77c60/info, priority=13, startTime=1732562922730; duration=0sec 2024-11-25T19:28:42,760 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:28:42,760 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ef29e67aab69c72fb122bbdfdd77c60:info 2024-11-25T19:28:43,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:43,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:44,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:44,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:44,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:44,747 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0ef29e67aab69c72fb122bbdfdd77c60 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T19:28:44,752 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/a97d944a49ce4a898ebf5c72fbb17a50 is 1080, key is row0096/info:/1732562922732/Put/seqid=0 2024-11-25T19:28:44,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741857_1033 (size=12516) 2024-11-25T19:28:44,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741857_1033 (size=12516) 2024-11-25T19:28:44,759 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/a97d944a49ce4a898ebf5c72fbb17a50 2024-11-25T19:28:44,765 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/a97d944a49ce4a898ebf5c72fbb17a50 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/a97d944a49ce4a898ebf5c72fbb17a50 2024-11-25T19:28:44,770 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/a97d944a49ce4a898ebf5c72fbb17a50, entries=7, sequenceid=137, filesize=12.2 K 2024-11-25T19:28:44,771 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 0ef29e67aab69c72fb122bbdfdd77c60 in 24ms, sequenceid=137, compaction requested=true 2024-11-25T19:28:44,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:28:44,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ef29e67aab69c72fb122bbdfdd77c60:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T19:28:44,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:28:44,771 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T19:28:44,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:44,772 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0ef29e67aab69c72fb122bbdfdd77c60 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-25T19:28:44,773 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 59265 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T19:28:44,773 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1541): 0ef29e67aab69c72fb122bbdfdd77c60/info is initiating minor compaction (all files) 2024-11-25T19:28:44,773 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0ef29e67aab69c72fb122bbdfdd77c60/info in TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60. 2024-11-25T19:28:44,773 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/f3b3d2b9917349209cd9a28d53f1bed1, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/2e07ebca42f44f5188828926d305fdba, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/a97d944a49ce4a898ebf5c72fbb17a50] into tmpdir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp, totalSize=57.9 K 2024-11-25T19:28:44,773 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting f3b3d2b9917349209cd9a28d53f1bed1, keycount=22, bloomtype=ROW, size=28.2 K, encoding=NONE, compression=NONE, seqNum=111, earliestPutTs=1732562910623 2024-11-25T19:28:44,774 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2e07ebca42f44f5188828926d305fdba, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1732562922707 2024-11-25T19:28:44,774 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting a97d944a49ce4a898ebf5c72fbb17a50, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732562922732 2024-11-25T19:28:44,776 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/90abd6e6269b4d448ed6c4da5400695f is 1080, key is row0103/info:/1732562924748/Put/seqid=0 2024-11-25T19:28:44,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741858_1034 (size=17906) 2024-11-25T19:28:44,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741858_1034 (size=17906) 2024-11-25T19:28:44,787 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/90abd6e6269b4d448ed6c4da5400695f 2024-11-25T19:28:44,787 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ef29e67aab69c72fb122bbdfdd77c60#info#compaction#72 average throughput is 42.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T19:28:44,788 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/b249663050c140938e7c080fa62ef5de is 1080, key is row0062/info:/1732562910623/Put/seqid=0 2024-11-25T19:28:44,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/90abd6e6269b4d448ed6c4da5400695f as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/90abd6e6269b4d448ed6c4da5400695f 2024-11-25T19:28:44,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741859_1035 (size=49463) 2024-11-25T19:28:44,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741859_1035 (size=49463) 2024-11-25T19:28:44,800 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/90abd6e6269b4d448ed6c4da5400695f, entries=12, sequenceid=152, filesize=17.5 K 2024-11-25T19:28:44,801 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=13.66 KB/13988 for 0ef29e67aab69c72fb122bbdfdd77c60 in 29ms, sequenceid=152, compaction requested=false 2024-11-25T19:28:44,801 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/b249663050c140938e7c080fa62ef5de as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/b249663050c140938e7c080fa62ef5de 2024-11-25T19:28:44,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:28:44,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:44,803 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0ef29e67aab69c72fb122bbdfdd77c60 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-25T19:28:44,807 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/8add8e85a32e477b8d1b2a9ba4e92038 is 1080, key is row0115/info:/1732562924773/Put/seqid=0 2024-11-25T19:28:44,808 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0ef29e67aab69c72fb122bbdfdd77c60/info of 0ef29e67aab69c72fb122bbdfdd77c60 into b249663050c140938e7c080fa62ef5de(size=48.3 K), total size for store is 65.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T19:28:44,808 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:28:44,808 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60., storeName=0ef29e67aab69c72fb122bbdfdd77c60/info, priority=13, startTime=1732562924771; duration=0sec 2024-11-25T19:28:44,808 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:28:44,808 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ef29e67aab69c72fb122bbdfdd77c60:info 2024-11-25T19:28:44,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741860_1036 (size=20078) 2024-11-25T19:28:44,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741860_1036 (size=20078) 2024-11-25T19:28:44,811 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/8add8e85a32e477b8d1b2a9ba4e92038 2024-11-25T19:28:44,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/8add8e85a32e477b8d1b2a9ba4e92038 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/8add8e85a32e477b8d1b2a9ba4e92038 2024-11-25T19:28:44,822 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/8add8e85a32e477b8d1b2a9ba4e92038, entries=14, sequenceid=169, filesize=19.6 K 2024-11-25T19:28:44,823 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=0 B/0 for 0ef29e67aab69c72fb122bbdfdd77c60 in 20ms, sequenceid=169, compaction requested=true 2024-11-25T19:28:44,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:28:44,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ef29e67aab69c72fb122bbdfdd77c60:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T19:28:44,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:28:44,823 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T19:28:44,824 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 87447 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T19:28:44,824 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1541): 0ef29e67aab69c72fb122bbdfdd77c60/info is initiating minor compaction (all files) 2024-11-25T19:28:44,825 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0ef29e67aab69c72fb122bbdfdd77c60/info in TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60. 2024-11-25T19:28:44,825 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/b249663050c140938e7c080fa62ef5de, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/90abd6e6269b4d448ed6c4da5400695f, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/8add8e85a32e477b8d1b2a9ba4e92038] into tmpdir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp, totalSize=85.4 K 2024-11-25T19:28:44,825 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting b249663050c140938e7c080fa62ef5de, keycount=41, bloomtype=ROW, size=48.3 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732562910623 2024-11-25T19:28:44,825 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting 90abd6e6269b4d448ed6c4da5400695f, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732562924748 2024-11-25T19:28:44,826 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8add8e85a32e477b8d1b2a9ba4e92038, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732562924773 2024-11-25T19:28:44,835 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ef29e67aab69c72fb122bbdfdd77c60#info#compaction#74 average throughput is 68.75 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T19:28:44,836 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/9715e75951df4733a4ca5f3a1b679106 is 1080, key is row0062/info:/1732562910623/Put/seqid=0 2024-11-25T19:28:44,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741861_1037 (size=77734) 2024-11-25T19:28:44,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741861_1037 (size=77734) 2024-11-25T19:28:44,845 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/9715e75951df4733a4ca5f3a1b679106 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/9715e75951df4733a4ca5f3a1b679106 2024-11-25T19:28:44,850 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0ef29e67aab69c72fb122bbdfdd77c60/info of 0ef29e67aab69c72fb122bbdfdd77c60 into 9715e75951df4733a4ca5f3a1b679106(size=75.9 K), total size for store is 75.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T19:28:44,850 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:28:44,850 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60., storeName=0ef29e67aab69c72fb122bbdfdd77c60/info, priority=13, startTime=1732562924823; duration=0sec 2024-11-25T19:28:44,850 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:28:44,850 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ef29e67aab69c72fb122bbdfdd77c60:info 2024-11-25T19:28:45,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:45,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:46,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:46,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:46,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:46,821 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0ef29e67aab69c72fb122bbdfdd77c60 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T19:28:46,825 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/43dd0b04407141e4bb2fce3a71a0d435 is 1080, key is row0129/info:/1732562926808/Put/seqid=0 2024-11-25T19:28:46,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741862_1038 (size=12516) 2024-11-25T19:28:46,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741862_1038 (size=12516) 2024-11-25T19:28:46,854 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=181 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/43dd0b04407141e4bb2fce3a71a0d435 2024-11-25T19:28:46,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/43dd0b04407141e4bb2fce3a71a0d435 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/43dd0b04407141e4bb2fce3a71a0d435 2024-11-25T19:28:46,866 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/43dd0b04407141e4bb2fce3a71a0d435, entries=7, sequenceid=181, filesize=12.2 K 2024-11-25T19:28:46,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 0ef29e67aab69c72fb122bbdfdd77c60 in 45ms, sequenceid=181, compaction requested=false 2024-11-25T19:28:46,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:28:46,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:46,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0ef29e67aab69c72fb122bbdfdd77c60 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-25T19:28:46,871 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/4ec6814f630f4bd780a3eaf796bd73b3 is 1080, key is row0136/info:/1732562926822/Put/seqid=0 2024-11-25T19:28:46,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741863_1039 (size=21156) 2024-11-25T19:28:46,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741863_1039 (size=21156) 2024-11-25T19:28:46,887 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/4ec6814f630f4bd780a3eaf796bd73b3 2024-11-25T19:28:46,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/4ec6814f630f4bd780a3eaf796bd73b3 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/4ec6814f630f4bd780a3eaf796bd73b3 2024-11-25T19:28:46,898 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/4ec6814f630f4bd780a3eaf796bd73b3, entries=15, sequenceid=199, filesize=20.7 K 2024-11-25T19:28:46,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=10.51 KB/10760 for 0ef29e67aab69c72fb122bbdfdd77c60 in 31ms, sequenceid=199, compaction requested=true 2024-11-25T19:28:46,899 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:28:46,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ef29e67aab69c72fb122bbdfdd77c60:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T19:28:46,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:28:46,899 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T19:28:46,900 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111406 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T19:28:46,900 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1541): 0ef29e67aab69c72fb122bbdfdd77c60/info is initiating minor compaction (all files) 2024-11-25T19:28:46,900 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0ef29e67aab69c72fb122bbdfdd77c60/info in TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60. 2024-11-25T19:28:46,900 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/9715e75951df4733a4ca5f3a1b679106, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/43dd0b04407141e4bb2fce3a71a0d435, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/4ec6814f630f4bd780a3eaf796bd73b3] into tmpdir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp, totalSize=108.8 K 2024-11-25T19:28:46,901 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9715e75951df4733a4ca5f3a1b679106, keycount=67, bloomtype=ROW, size=75.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732562910623 2024-11-25T19:28:46,901 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting 43dd0b04407141e4bb2fce3a71a0d435, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1732562926808 2024-11-25T19:28:46,901 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4ec6814f630f4bd780a3eaf796bd73b3, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732562926822 2024-11-25T19:28:46,914 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ef29e67aab69c72fb122bbdfdd77c60#info#compaction#77 average throughput is 30.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T19:28:46,915 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/cf0cea703a914100852d06c932e8d09e is 1080, key is row0062/info:/1732562910623/Put/seqid=0 2024-11-25T19:28:46,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741864_1040 (size=101556) 2024-11-25T19:28:46,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741864_1040 (size=101556) 2024-11-25T19:28:46,925 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/cf0cea703a914100852d06c932e8d09e as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/cf0cea703a914100852d06c932e8d09e 2024-11-25T19:28:46,930 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0ef29e67aab69c72fb122bbdfdd77c60/info of 0ef29e67aab69c72fb122bbdfdd77c60 into cf0cea703a914100852d06c932e8d09e(size=99.2 K), total size for store is 99.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T19:28:46,930 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:28:46,931 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60., storeName=0ef29e67aab69c72fb122bbdfdd77c60/info, priority=13, startTime=1732562926899; duration=0sec 2024-11-25T19:28:46,931 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:28:46,931 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ef29e67aab69c72fb122bbdfdd77c60:info 2024-11-25T19:28:47,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:47,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:48,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:48,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:48,638 INFO [master/6ef6ccb75414:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-25T19:28:48,638 INFO [master/6ef6ccb75414:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-25T19:28:48,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:48,890 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0ef29e67aab69c72fb122bbdfdd77c60 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-25T19:28:48,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/dc53759a2a404fbe9cffcf92f3370be4 is 1080, key is row0151/info:/1732562926868/Put/seqid=0 2024-11-25T19:28:48,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741865_1041 (size=16828) 2024-11-25T19:28:48,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741865_1041 (size=16828) 2024-11-25T19:28:48,905 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/dc53759a2a404fbe9cffcf92f3370be4 2024-11-25T19:28:48,914 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/dc53759a2a404fbe9cffcf92f3370be4 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/dc53759a2a404fbe9cffcf92f3370be4 2024-11-25T19:28:48,920 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/dc53759a2a404fbe9cffcf92f3370be4, entries=11, sequenceid=214, filesize=16.4 K 2024-11-25T19:28:48,921 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=12.61 KB/12912 for 0ef29e67aab69c72fb122bbdfdd77c60 in 31ms, sequenceid=214, compaction requested=false 2024-11-25T19:28:48,921 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:28:48,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:48,921 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0ef29e67aab69c72fb122bbdfdd77c60 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-25T19:28:48,925 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/28cc856dcaf94303865d8c09bd4f87b6 is 1080, key is row0162/info:/1732562928892/Put/seqid=0 2024-11-25T19:28:48,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741866_1042 (size=19000) 2024-11-25T19:28:48,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741866_1042 (size=19000) 2024-11-25T19:28:48,930 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/28cc856dcaf94303865d8c09bd4f87b6 2024-11-25T19:28:48,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/28cc856dcaf94303865d8c09bd4f87b6 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/28cc856dcaf94303865d8c09bd4f87b6 2024-11-25T19:28:48,940 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/28cc856dcaf94303865d8c09bd4f87b6, entries=13, sequenceid=230, filesize=18.6 K 2024-11-25T19:28:48,941 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=11.56 KB/11836 for 0ef29e67aab69c72fb122bbdfdd77c60 in 20ms, sequenceid=230, compaction requested=true 2024-11-25T19:28:48,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:28:48,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ef29e67aab69c72fb122bbdfdd77c60:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T19:28:48,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:28:48,942 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T19:28:48,943 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 137384 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T19:28:48,943 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1541): 0ef29e67aab69c72fb122bbdfdd77c60/info is initiating minor compaction (all files) 2024-11-25T19:28:48,943 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0ef29e67aab69c72fb122bbdfdd77c60/info in TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60. 2024-11-25T19:28:48,943 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/cf0cea703a914100852d06c932e8d09e, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/dc53759a2a404fbe9cffcf92f3370be4, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/28cc856dcaf94303865d8c09bd4f87b6] into tmpdir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp, totalSize=134.2 K 2024-11-25T19:28:48,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:48,943 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0ef29e67aab69c72fb122bbdfdd77c60 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-25T19:28:48,944 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting cf0cea703a914100852d06c932e8d09e, keycount=89, bloomtype=ROW, size=99.2 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732562910623 2024-11-25T19:28:48,944 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting dc53759a2a404fbe9cffcf92f3370be4, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732562926868 2024-11-25T19:28:48,945 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting 28cc856dcaf94303865d8c09bd4f87b6, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1732562928892 2024-11-25T19:28:48,947 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/184aa9d5e10b441f94731a3fcfe3117b is 1080, key is row0175/info:/1732562928922/Put/seqid=0 2024-11-25T19:28:48,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741867_1043 (size=19000) 2024-11-25T19:28:48,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741867_1043 (size=19000) 2024-11-25T19:28:48,953 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/184aa9d5e10b441f94731a3fcfe3117b 2024-11-25T19:28:48,957 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ef29e67aab69c72fb122bbdfdd77c60#info#compaction#81 average throughput is 38.65 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T19:28:48,957 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/7d52875c0151480e8f8ed946491f7598 is 1080, key is row0062/info:/1732562910623/Put/seqid=0 2024-11-25T19:28:48,958 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/184aa9d5e10b441f94731a3fcfe3117b as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/184aa9d5e10b441f94731a3fcfe3117b 2024-11-25T19:28:48,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741868_1044 (size=127666) 2024-11-25T19:28:48,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741868_1044 (size=127666) 2024-11-25T19:28:48,963 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/184aa9d5e10b441f94731a3fcfe3117b, entries=13, sequenceid=246, filesize=18.6 K 2024-11-25T19:28:48,964 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=5.25 KB/5380 for 0ef29e67aab69c72fb122bbdfdd77c60 in 21ms, sequenceid=246, compaction requested=false 2024-11-25T19:28:48,965 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:28:48,967 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/7d52875c0151480e8f8ed946491f7598 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/7d52875c0151480e8f8ed946491f7598 2024-11-25T19:28:48,972 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0ef29e67aab69c72fb122bbdfdd77c60/info of 0ef29e67aab69c72fb122bbdfdd77c60 into 7d52875c0151480e8f8ed946491f7598(size=124.7 K), total size for store is 143.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T19:28:48,972 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:28:48,972 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60., storeName=0ef29e67aab69c72fb122bbdfdd77c60/info, priority=13, startTime=1732562928942; duration=0sec 2024-11-25T19:28:48,972 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:28:48,972 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ef29e67aab69c72fb122bbdfdd77c60:info 2024-11-25T19:28:49,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:49,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:50,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:50,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:50,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:50,959 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0ef29e67aab69c72fb122bbdfdd77c60 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T19:28:50,974 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/6fe8c008d5124119994dc02aad557742 is 1080, key is row0188/info:/1732562928945/Put/seqid=0 2024-11-25T19:28:50,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741869_1045 (size=12517) 2024-11-25T19:28:50,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741869_1045 (size=12517) 2024-11-25T19:28:50,985 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=257 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/6fe8c008d5124119994dc02aad557742 2024-11-25T19:28:50,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/6fe8c008d5124119994dc02aad557742 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/6fe8c008d5124119994dc02aad557742 2024-11-25T19:28:51,000 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/6fe8c008d5124119994dc02aad557742, entries=7, sequenceid=257, filesize=12.2 K 2024-11-25T19:28:51,002 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for 0ef29e67aab69c72fb122bbdfdd77c60 in 43ms, sequenceid=257, compaction requested=true 2024-11-25T19:28:51,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:28:51,002 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T19:28:51,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ef29e67aab69c72fb122bbdfdd77c60:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T19:28:51,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:28:51,003 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 159183 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T19:28:51,003 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1541): 0ef29e67aab69c72fb122bbdfdd77c60/info is initiating minor compaction (all files) 2024-11-25T19:28:51,003 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0ef29e67aab69c72fb122bbdfdd77c60/info in TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60. 2024-11-25T19:28:51,004 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/7d52875c0151480e8f8ed946491f7598, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/184aa9d5e10b441f94731a3fcfe3117b, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/6fe8c008d5124119994dc02aad557742] into tmpdir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp, totalSize=155.5 K 2024-11-25T19:28:51,004 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7d52875c0151480e8f8ed946491f7598, keycount=113, bloomtype=ROW, size=124.7 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1732562910623 2024-11-25T19:28:51,005 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting 184aa9d5e10b441f94731a3fcfe3117b, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732562928922 2024-11-25T19:28:51,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:28:51,005 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0ef29e67aab69c72fb122bbdfdd77c60 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-11-25T19:28:51,005 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6fe8c008d5124119994dc02aad557742, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1732562928945 2024-11-25T19:28:51,011 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/ec4cbe70a6ee41b49c310d7b61eb6e02 is 1080, key is row0195/info:/1732562930960/Put/seqid=0 2024-11-25T19:28:51,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41557 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0ef29e67aab69c72fb122bbdfdd77c60, server=6ef6ccb75414,41557,1732562887438 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-25T19:28:51,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41557 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:43596 deadline: 1732562941026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0ef29e67aab69c72fb122bbdfdd77c60, server=6ef6ccb75414,41557,1732562887438 2024-11-25T19:28:51,027 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60., hostname=6ef6ccb75414,41557,1732562887438, seqNum=86 , the old value is region=TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60., hostname=6ef6ccb75414,41557,1732562887438, seqNum=86, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0ef29e67aab69c72fb122bbdfdd77c60, server=6ef6ccb75414,41557,1732562887438 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-25T19:28:51,027 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60., hostname=6ef6ccb75414,41557,1732562887438, seqNum=86 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0ef29e67aab69c72fb122bbdfdd77c60, server=6ef6ccb75414,41557,1732562887438 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-25T19:28:51,027 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60., hostname=6ef6ccb75414,41557,1732562887438, seqNum=86 because the exception is null or not the one we care about 2024-11-25T19:28:51,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741870_1046 (size=26570) 2024-11-25T19:28:51,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741870_1046 (size=26570) 2024-11-25T19:28:51,030 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/ec4cbe70a6ee41b49c310d7b61eb6e02 2024-11-25T19:28:51,034 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ef29e67aab69c72fb122bbdfdd77c60#info#compaction#84 average throughput is 22.75 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T19:28:51,035 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/e49a66d9398d4ef7b1db021101d69460 is 1080, key is row0062/info:/1732562910623/Put/seqid=0 2024-11-25T19:28:51,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741871_1047 (size=149402) 2024-11-25T19:28:51,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741871_1047 (size=149402) 2024-11-25T19:28:51,046 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/ec4cbe70a6ee41b49c310d7b61eb6e02 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/ec4cbe70a6ee41b49c310d7b61eb6e02 2024-11-25T19:28:51,052 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/e49a66d9398d4ef7b1db021101d69460 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/e49a66d9398d4ef7b1db021101d69460 2024-11-25T19:28:51,056 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/ec4cbe70a6ee41b49c310d7b61eb6e02, entries=20, sequenceid=280, filesize=25.9 K 2024-11-25T19:28:51,063 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0ef29e67aab69c72fb122bbdfdd77c60/info of 0ef29e67aab69c72fb122bbdfdd77c60 into e49a66d9398d4ef7b1db021101d69460(size=145.9 K), total size for store is 171.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T19:28:51,063 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:28:51,063 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60., storeName=0ef29e67aab69c72fb122bbdfdd77c60/info, priority=13, startTime=1732562931002; duration=0sec 2024-11-25T19:28:51,063 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:28:51,063 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ef29e67aab69c72fb122bbdfdd77c60:info 2024-11-25T19:28:51,065 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=9.46 KB/9684 for 0ef29e67aab69c72fb122bbdfdd77c60 in 52ms, sequenceid=280, compaction requested=false 2024-11-25T19:28:51,065 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:28:51,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:51,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:52,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:52,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:53,193 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-11-25T19:28:53,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:53,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:54,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:54,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:55,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:55,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:56,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:56,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:57,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:57,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:58,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:58,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:59,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:28:59,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:29:00,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:29:00,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:29:01,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:29:01,108 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0ef29e67aab69c72fb122bbdfdd77c60 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-25T19:29:01,113 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/0ba4b9e5c0514841b4d920d237ae62a1 is 1080, key is row0215/info:/1732562931006/Put/seqid=0 2024-11-25T19:29:01,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741872_1048 (size=15760) 2024-11-25T19:29:01,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741872_1048 (size=15760) 2024-11-25T19:29:01,120 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/0ba4b9e5c0514841b4d920d237ae62a1 2024-11-25T19:29:01,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/0ba4b9e5c0514841b4d920d237ae62a1 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/0ba4b9e5c0514841b4d920d237ae62a1 2024-11-25T19:29:01,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/0ba4b9e5c0514841b4d920d237ae62a1, entries=10, sequenceid=294, filesize=15.4 K 2024-11-25T19:29:01,132 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=0 B/0 for 0ef29e67aab69c72fb122bbdfdd77c60 in 24ms, sequenceid=294, compaction requested=true 2024-11-25T19:29:01,132 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:29:01,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ef29e67aab69c72fb122bbdfdd77c60:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T19:29:01,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:29:01,133 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T19:29:01,134 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 191732 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T19:29:01,134 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1541): 0ef29e67aab69c72fb122bbdfdd77c60/info is initiating minor compaction (all files) 2024-11-25T19:29:01,134 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0ef29e67aab69c72fb122bbdfdd77c60/info in TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60. 2024-11-25T19:29:01,134 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/e49a66d9398d4ef7b1db021101d69460, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/ec4cbe70a6ee41b49c310d7b61eb6e02, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/0ba4b9e5c0514841b4d920d237ae62a1] into tmpdir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp, totalSize=187.2 K 2024-11-25T19:29:01,134 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting e49a66d9398d4ef7b1db021101d69460, keycount=133, bloomtype=ROW, size=145.9 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1732562910623 2024-11-25T19:29:01,135 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting ec4cbe70a6ee41b49c310d7b61eb6e02, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732562930960 2024-11-25T19:29:01,135 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0ba4b9e5c0514841b4d920d237ae62a1, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732562931006 2024-11-25T19:29:01,146 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ef29e67aab69c72fb122bbdfdd77c60#info#compaction#86 average throughput is 83.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T19:29:01,147 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/91a6fdb79861490b97e5412228a6004a is 1080, key is row0062/info:/1732562910623/Put/seqid=0 2024-11-25T19:29:01,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741873_1049 (size=181882) 2024-11-25T19:29:01,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741873_1049 (size=181882) 2024-11-25T19:29:01,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:29:01,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:29:01,562 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/91a6fdb79861490b97e5412228a6004a as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/91a6fdb79861490b97e5412228a6004a 2024-11-25T19:29:01,574 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0ef29e67aab69c72fb122bbdfdd77c60/info of 0ef29e67aab69c72fb122bbdfdd77c60 into 91a6fdb79861490b97e5412228a6004a(size=177.6 K), total size for store is 177.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T19:29:01,574 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:29:01,574 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60., storeName=0ef29e67aab69c72fb122bbdfdd77c60/info, priority=13, startTime=1732562941133; duration=0sec 2024-11-25T19:29:01,575 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:29:01,575 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ef29e67aab69c72fb122bbdfdd77c60:info 2024-11-25T19:29:02,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:29:02,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:29:03,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:29:03,123 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0ef29e67aab69c72fb122bbdfdd77c60 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T19:29:03,129 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/d7142362f65f4b8ab5b2599a4bae46f2 is 1080, key is row0225/info:/1732562943110/Put/seqid=0 2024-11-25T19:29:03,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741874_1050 (size=12523) 2024-11-25T19:29:03,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741874_1050 (size=12523) 2024-11-25T19:29:03,134 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/d7142362f65f4b8ab5b2599a4bae46f2 2024-11-25T19:29:03,143 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/d7142362f65f4b8ab5b2599a4bae46f2 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/d7142362f65f4b8ab5b2599a4bae46f2 2024-11-25T19:29:03,148 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/d7142362f65f4b8ab5b2599a4bae46f2, entries=7, sequenceid=305, filesize=12.2 K 2024-11-25T19:29:03,149 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 0ef29e67aab69c72fb122bbdfdd77c60 in 26ms, sequenceid=305, compaction requested=false 2024-11-25T19:29:03,149 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:29:03,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:29:03,150 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0ef29e67aab69c72fb122bbdfdd77c60 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-25T19:29:03,155 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/75f51bb82ff84aed91c0291547d0692f is 1080, key is row0232/info:/1732562943124/Put/seqid=0 2024-11-25T19:29:03,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741875_1051 (size=17918) 2024-11-25T19:29:03,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741875_1051 (size=17918) 2024-11-25T19:29:03,163 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/75f51bb82ff84aed91c0291547d0692f 2024-11-25T19:29:03,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/75f51bb82ff84aed91c0291547d0692f as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/75f51bb82ff84aed91c0291547d0692f 2024-11-25T19:29:03,174 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/75f51bb82ff84aed91c0291547d0692f, entries=12, sequenceid=320, filesize=17.5 K 2024-11-25T19:29:03,175 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=12.61 KB/12912 for 0ef29e67aab69c72fb122bbdfdd77c60 in 26ms, sequenceid=320, compaction requested=true 2024-11-25T19:29:03,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:29:03,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ef29e67aab69c72fb122bbdfdd77c60:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T19:29:03,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:29:03,175 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T19:29:03,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41557 {}] regionserver.HRegion(8855): Flush requested on 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:29:03,176 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0ef29e67aab69c72fb122bbdfdd77c60 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-25T19:29:03,176 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 212323 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T19:29:03,176 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1541): 0ef29e67aab69c72fb122bbdfdd77c60/info is initiating minor compaction (all files) 2024-11-25T19:29:03,176 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0ef29e67aab69c72fb122bbdfdd77c60/info in TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60. 2024-11-25T19:29:03,177 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/91a6fdb79861490b97e5412228a6004a, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/d7142362f65f4b8ab5b2599a4bae46f2, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/75f51bb82ff84aed91c0291547d0692f] into tmpdir=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp, totalSize=207.3 K 2024-11-25T19:29:03,177 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting 91a6fdb79861490b97e5412228a6004a, keycount=163, bloomtype=ROW, size=177.6 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732562910623 2024-11-25T19:29:03,177 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting d7142362f65f4b8ab5b2599a4bae46f2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1732562943110 2024-11-25T19:29:03,178 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] compactions.Compactor(225): Compacting 75f51bb82ff84aed91c0291547d0692f, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1732562943124 2024-11-25T19:29:03,179 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/1c3c5008924d48879974518e96cbbd2c is 1080, key is row0244/info:/1732562943150/Put/seqid=0 2024-11-25T19:29:03,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741876_1052 (size=19013) 2024-11-25T19:29:03,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741876_1052 (size=19013) 2024-11-25T19:29:03,184 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/1c3c5008924d48879974518e96cbbd2c 2024-11-25T19:29:03,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/1c3c5008924d48879974518e96cbbd2c as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/1c3c5008924d48879974518e96cbbd2c 2024-11-25T19:29:03,190 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ef29e67aab69c72fb122bbdfdd77c60#info#compaction#90 average throughput is 46.69 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T19:29:03,191 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/f0bcb94b40b8429f8b813c86129ad1de is 1080, key is row0062/info:/1732562910623/Put/seqid=0 2024-11-25T19:29:03,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741877_1053 (size=202489) 2024-11-25T19:29:03,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741877_1053 (size=202489) 2024-11-25T19:29:03,195 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/1c3c5008924d48879974518e96cbbd2c, entries=13, sequenceid=336, filesize=18.6 K 2024-11-25T19:29:03,196 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=0 B/0 for 0ef29e67aab69c72fb122bbdfdd77c60 in 21ms, sequenceid=336, compaction requested=false 2024-11-25T19:29:03,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:29:03,199 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/.tmp/info/f0bcb94b40b8429f8b813c86129ad1de as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/f0bcb94b40b8429f8b813c86129ad1de 2024-11-25T19:29:03,204 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0ef29e67aab69c72fb122bbdfdd77c60/info of 0ef29e67aab69c72fb122bbdfdd77c60 into f0bcb94b40b8429f8b813c86129ad1de(size=197.7 K), total size for store is 216.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T19:29:03,204 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0ef29e67aab69c72fb122bbdfdd77c60: 2024-11-25T19:29:03,204 INFO [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60., storeName=0ef29e67aab69c72fb122bbdfdd77c60/info, priority=13, startTime=1732562943175; duration=0sec 2024-11-25T19:29:03,204 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T19:29:03,204 DEBUG [RS:0;6ef6ccb75414:41557-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ef29e67aab69c72fb122bbdfdd77c60:info 2024-11-25T19:29:03,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:29:03,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:29:04,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:29:04,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:29:05,176 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-25T19:29:05,177 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C41557%2C1732562887438.1732562945176 2024-11-25T19:29:05,208 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:05,208 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:05,209 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:05,209 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:05,209 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:05,209 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/WALs/6ef6ccb75414,41557,1732562887438/6ef6ccb75414%2C41557%2C1732562887438.1732562887827 with entries=316, filesize=309.49 KB; new WAL /user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/WALs/6ef6ccb75414,41557,1732562887438/6ef6ccb75414%2C41557%2C1732562887438.1732562945176 2024-11-25T19:29:05,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741833_1009 (size=316924) 2024-11-25T19:29:05,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741833_1009 (size=316924) 2024-11-25T19:29:05,217 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/WALs/6ef6ccb75414,41557,1732562887438/6ef6ccb75414%2C41557%2C1732562887438.1732562887827 to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/oldWALs/6ef6ccb75414%2C41557%2C1732562887438.1732562887827 2024-11-25T19:29:05,224 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40775:40775),(127.0.0.1/127.0.0.1:44791:44791)] 2024-11-25T19:29:05,230 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T19:29:05,230 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T19:29:05,230 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T19:29:05,230 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:29:05,230 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:29:05,230 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T19:29:05,231 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T19:29:05,231 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1494365335, stopped=false 2024-11-25T19:29:05,231 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6ef6ccb75414,35203,1732562887392 2024-11-25T19:29:05,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41557-0x100785aff370001, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T19:29:05,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T19:29:05,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41557-0x100785aff370001, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:29:05,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:29:05,232 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T19:29:05,232 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T19:29:05,232 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T19:29:05,232 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:29:05,233 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6ef6ccb75414,41557,1732562887438' ***** 2024-11-25T19:29:05,233 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T19:29:05,233 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:29:05,233 INFO [RS:0;6ef6ccb75414:41557 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T19:29:05,233 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T19:29:05,234 INFO [RS:0;6ef6ccb75414:41557 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T19:29:05,234 INFO [RS:0;6ef6ccb75414:41557 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T19:29:05,234 INFO [RS:0;6ef6ccb75414:41557 {}] regionserver.HRegionServer(3091): Received CLOSE for 0ef29e67aab69c72fb122bbdfdd77c60 2024-11-25T19:29:05,234 INFO [RS:0;6ef6ccb75414:41557 {}] regionserver.HRegionServer(3091): Received CLOSE for fd0e73512ad398fb0fd0eb24ae1fd9ef 2024-11-25T19:29:05,234 INFO [RS:0;6ef6ccb75414:41557 {}] regionserver.HRegionServer(959): stopping server 6ef6ccb75414,41557,1732562887438 2024-11-25T19:29:05,234 INFO [RS:0;6ef6ccb75414:41557 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T19:29:05,234 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0ef29e67aab69c72fb122bbdfdd77c60, disabling compactions & flushes 2024-11-25T19:29:05,234 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60. 2024-11-25T19:29:05,234 INFO [RS:0;6ef6ccb75414:41557 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6ef6ccb75414:41557. 2024-11-25T19:29:05,234 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60. 2024-11-25T19:29:05,234 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60. after waiting 0 ms 2024-11-25T19:29:05,234 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60. 2024-11-25T19:29:05,234 DEBUG [RS:0;6ef6ccb75414:41557 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T19:29:05,234 DEBUG [RS:0;6ef6ccb75414:41557 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:29:05,234 INFO [RS:0;6ef6ccb75414:41557 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T19:29:05,234 INFO [RS:0;6ef6ccb75414:41557 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T19:29:05,234 INFO [RS:0;6ef6ccb75414:41557 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T19:29:05,234 INFO [RS:0;6ef6ccb75414:41557 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T19:29:05,237 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/c463738301804307ba2a8727374d8e09.de88f78d5c577744ae8825b46ed080e0->hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/c463738301804307ba2a8727374d8e09-top, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/b1c5e9f45571448e833b13cbad432fc9, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/e685a3f048364591828d8ae6de8a4874, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/f3b3d2b9917349209cd9a28d53f1bed1, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/9f95377d189043f48b5ba22e960707bc, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/2e07ebca42f44f5188828926d305fdba, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/b249663050c140938e7c080fa62ef5de, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/a97d944a49ce4a898ebf5c72fbb17a50, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/90abd6e6269b4d448ed6c4da5400695f, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/9715e75951df4733a4ca5f3a1b679106, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/8add8e85a32e477b8d1b2a9ba4e92038, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/43dd0b04407141e4bb2fce3a71a0d435, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/cf0cea703a914100852d06c932e8d09e, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/4ec6814f630f4bd780a3eaf796bd73b3, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/dc53759a2a404fbe9cffcf92f3370be4, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/7d52875c0151480e8f8ed946491f7598, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/28cc856dcaf94303865d8c09bd4f87b6, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/184aa9d5e10b441f94731a3fcfe3117b, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/e49a66d9398d4ef7b1db021101d69460, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/6fe8c008d5124119994dc02aad557742, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/ec4cbe70a6ee41b49c310d7b61eb6e02, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/91a6fdb79861490b97e5412228a6004a, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/0ba4b9e5c0514841b4d920d237ae62a1, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/d7142362f65f4b8ab5b2599a4bae46f2, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/75f51bb82ff84aed91c0291547d0692f] to archive 2024-11-25T19:29:05,239 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T19:29:05,240 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/c463738301804307ba2a8727374d8e09.de88f78d5c577744ae8825b46ed080e0 to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/c463738301804307ba2a8727374d8e09.de88f78d5c577744ae8825b46ed080e0 2024-11-25T19:29:05,241 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/b1c5e9f45571448e833b13cbad432fc9 to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/b1c5e9f45571448e833b13cbad432fc9 2024-11-25T19:29:05,242 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41557-0x100785aff370001, quorum=127.0.0.1:59410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:29:05,243 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/e685a3f048364591828d8ae6de8a4874 to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/e685a3f048364591828d8ae6de8a4874 2024-11-25T19:29:05,244 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/f3b3d2b9917349209cd9a28d53f1bed1 to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/f3b3d2b9917349209cd9a28d53f1bed1 2024-11-25T19:29:05,245 INFO [RS:0;6ef6ccb75414:41557 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-25T19:29:05,245 DEBUG [RS:0;6ef6ccb75414:41557 {}] regionserver.HRegionServer(1325): Online Regions={0ef29e67aab69c72fb122bbdfdd77c60=TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60., 1588230740=hbase:meta,,1.1588230740, fd0e73512ad398fb0fd0eb24ae1fd9ef=TestLogRolling-testLogRolling,,1732562910694.fd0e73512ad398fb0fd0eb24ae1fd9ef.} 2024-11-25T19:29:05,245 DEBUG [RS:0;6ef6ccb75414:41557 {}] regionserver.HRegionServer(1351): Waiting on 0ef29e67aab69c72fb122bbdfdd77c60, 1588230740, fd0e73512ad398fb0fd0eb24ae1fd9ef 2024-11-25T19:29:05,245 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T19:29:05,245 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T19:29:05,245 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/9f95377d189043f48b5ba22e960707bc to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/9f95377d189043f48b5ba22e960707bc 2024-11-25T19:29:05,245 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T19:29:05,245 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T19:29:05,245 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T19:29:05,245 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-25T19:29:05,249 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/2e07ebca42f44f5188828926d305fdba to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/2e07ebca42f44f5188828926d305fdba 2024-11-25T19:29:05,250 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/b249663050c140938e7c080fa62ef5de to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/b249663050c140938e7c080fa62ef5de 2024-11-25T19:29:05,251 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/a97d944a49ce4a898ebf5c72fbb17a50 to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/a97d944a49ce4a898ebf5c72fbb17a50 2024-11-25T19:29:05,252 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740/.tmp/info/3000ef8db43149568676f22e8f7dbfa9 is 186, key is TestLogRolling-testLogRolling,,1732562910694.fd0e73512ad398fb0fd0eb24ae1fd9ef./info:regioninfo/1732562911368/Put/seqid=0 2024-11-25T19:29:05,253 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/90abd6e6269b4d448ed6c4da5400695f to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/90abd6e6269b4d448ed6c4da5400695f 2024-11-25T19:29:05,255 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/9715e75951df4733a4ca5f3a1b679106 to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/9715e75951df4733a4ca5f3a1b679106 2024-11-25T19:29:05,256 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/8add8e85a32e477b8d1b2a9ba4e92038 to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/8add8e85a32e477b8d1b2a9ba4e92038 2024-11-25T19:29:05,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741879_1055 (size=6153) 2024-11-25T19:29:05,258 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/43dd0b04407141e4bb2fce3a71a0d435 to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/43dd0b04407141e4bb2fce3a71a0d435 2024-11-25T19:29:05,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741879_1055 (size=6153) 2024-11-25T19:29:05,259 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/cf0cea703a914100852d06c932e8d09e to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/cf0cea703a914100852d06c932e8d09e 2024-11-25T19:29:05,261 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/4ec6814f630f4bd780a3eaf796bd73b3 to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/4ec6814f630f4bd780a3eaf796bd73b3 2024-11-25T19:29:05,262 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/dc53759a2a404fbe9cffcf92f3370be4 to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/dc53759a2a404fbe9cffcf92f3370be4 2024-11-25T19:29:05,263 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/7d52875c0151480e8f8ed946491f7598 to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/7d52875c0151480e8f8ed946491f7598 2024-11-25T19:29:05,264 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/28cc856dcaf94303865d8c09bd4f87b6 to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/28cc856dcaf94303865d8c09bd4f87b6 2024-11-25T19:29:05,265 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/184aa9d5e10b441f94731a3fcfe3117b to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/184aa9d5e10b441f94731a3fcfe3117b 2024-11-25T19:29:05,267 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/e49a66d9398d4ef7b1db021101d69460 to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/e49a66d9398d4ef7b1db021101d69460 2024-11-25T19:29:05,268 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/6fe8c008d5124119994dc02aad557742 to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/6fe8c008d5124119994dc02aad557742 2024-11-25T19:29:05,272 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740/.tmp/info/3000ef8db43149568676f22e8f7dbfa9 2024-11-25T19:29:05,273 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/ec4cbe70a6ee41b49c310d7b61eb6e02 to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/ec4cbe70a6ee41b49c310d7b61eb6e02 2024-11-25T19:29:05,274 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/91a6fdb79861490b97e5412228a6004a to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/91a6fdb79861490b97e5412228a6004a 2024-11-25T19:29:05,276 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/0ba4b9e5c0514841b4d920d237ae62a1 to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/0ba4b9e5c0514841b4d920d237ae62a1 2024-11-25T19:29:05,278 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/d7142362f65f4b8ab5b2599a4bae46f2 to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/d7142362f65f4b8ab5b2599a4bae46f2 2024-11-25T19:29:05,279 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740/.tmp/info/3000ef8db43149568676f22e8f7dbfa9 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740/info/3000ef8db43149568676f22e8f7dbfa9 2024-11-25T19:29:05,280 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/75f51bb82ff84aed91c0291547d0692f to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/info/75f51bb82ff84aed91c0291547d0692f 2024-11-25T19:29:05,281 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=6ef6ccb75414:35203 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-25T19:29:05,281 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [b1c5e9f45571448e833b13cbad432fc9=8260, e685a3f048364591828d8ae6de8a4874=12509, f3b3d2b9917349209cd9a28d53f1bed1=28855, 9f95377d189043f48b5ba22e960707bc=17894, 2e07ebca42f44f5188828926d305fdba=17894, b249663050c140938e7c080fa62ef5de=49463, a97d944a49ce4a898ebf5c72fbb17a50=12516, 90abd6e6269b4d448ed6c4da5400695f=17906, 9715e75951df4733a4ca5f3a1b679106=77734, 8add8e85a32e477b8d1b2a9ba4e92038=20078, 43dd0b04407141e4bb2fce3a71a0d435=12516, cf0cea703a914100852d06c932e8d09e=101556, 4ec6814f630f4bd780a3eaf796bd73b3=21156, dc53759a2a404fbe9cffcf92f3370be4=16828, 7d52875c0151480e8f8ed946491f7598=127666, 28cc856dcaf94303865d8c09bd4f87b6=19000, 184aa9d5e10b441f94731a3fcfe3117b=19000, e49a66d9398d4ef7b1db021101d69460=149402, 6fe8c008d5124119994dc02aad557742=12517, ec4cbe70a6ee41b49c310d7b61eb6e02=26570, 91a6fdb79861490b97e5412228a6004a=181882, 0ba4b9e5c0514841b4d920d237ae62a1=15760, d7142362f65f4b8ab5b2599a4bae46f2=12523, 75f51bb82ff84aed91c0291547d0692f=17918] 2024-11-25T19:29:05,286 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/0ef29e67aab69c72fb122bbdfdd77c60/recovered.edits/340.seqid, newMaxSeqId=340, maxSeqId=85 2024-11-25T19:29:05,286 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60. 2024-11-25T19:29:05,286 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0ef29e67aab69c72fb122bbdfdd77c60: Waiting for close lock at 1732562945234Running coprocessor pre-close hooks at 1732562945234Disabling compacts and flushes for region at 1732562945234Disabling writes for close at 1732562945234Writing region close event to WAL at 1732562945282 (+48 ms)Running coprocessor post-close hooks at 1732562945286 (+4 ms)Closed at 1732562945286 2024-11-25T19:29:05,286 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732562910694.0ef29e67aab69c72fb122bbdfdd77c60. 2024-11-25T19:29:05,286 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing fd0e73512ad398fb0fd0eb24ae1fd9ef, disabling compactions & flushes 2024-11-25T19:29:05,287 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732562910694.fd0e73512ad398fb0fd0eb24ae1fd9ef. 2024-11-25T19:29:05,287 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732562910694.fd0e73512ad398fb0fd0eb24ae1fd9ef. 2024-11-25T19:29:05,287 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732562910694.fd0e73512ad398fb0fd0eb24ae1fd9ef. after waiting 0 ms 2024-11-25T19:29:05,287 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732562910694.fd0e73512ad398fb0fd0eb24ae1fd9ef. 2024-11-25T19:29:05,287 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740/info/3000ef8db43149568676f22e8f7dbfa9, entries=5, sequenceid=21, filesize=6.0 K 2024-11-25T19:29:05,287 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732562910694.fd0e73512ad398fb0fd0eb24ae1fd9ef.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/fd0e73512ad398fb0fd0eb24ae1fd9ef/info/c463738301804307ba2a8727374d8e09.de88f78d5c577744ae8825b46ed080e0->hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/de88f78d5c577744ae8825b46ed080e0/info/c463738301804307ba2a8727374d8e09-bottom] to archive 2024-11-25T19:29:05,288 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 43ms, sequenceid=21, compaction requested=false 2024-11-25T19:29:05,288 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732562910694.fd0e73512ad398fb0fd0eb24ae1fd9ef.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T19:29:05,289 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732562910694.fd0e73512ad398fb0fd0eb24ae1fd9ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/fd0e73512ad398fb0fd0eb24ae1fd9ef/info/c463738301804307ba2a8727374d8e09.de88f78d5c577744ae8825b46ed080e0 to hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/archive/data/default/TestLogRolling-testLogRolling/fd0e73512ad398fb0fd0eb24ae1fd9ef/info/c463738301804307ba2a8727374d8e09.de88f78d5c577744ae8825b46ed080e0 2024-11-25T19:29:05,289 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732562910694.fd0e73512ad398fb0fd0eb24ae1fd9ef.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-25T19:29:05,291 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-25T19:29:05,292 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T19:29:05,292 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T19:29:05,292 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732562945245Running coprocessor pre-close hooks at 1732562945245Disabling compacts and flushes for region at 1732562945245Disabling writes for close at 1732562945245Obtaining lock to block concurrent updates at 1732562945246 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732562945246Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=670, getHeapSize=2000, getOffHeapSize=0, getCellsCount=5 at 1732562945246Flushing stores of hbase:meta,,1.1588230740 at 1732562945246Flushing 1588230740/info: creating writer at 1732562945247 (+1 ms)Flushing 1588230740/info: appending metadata at 1732562945251 (+4 ms)Flushing 1588230740/info: closing flushed file at 1732562945251Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f9f078e: reopening flushed file at 1732562945278 (+27 ms)Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 43ms, sequenceid=21, compaction requested=false at 1732562945288 (+10 ms)Writing region close event to WAL at 1732562945289 (+1 ms)Running coprocessor post-close hooks at 1732562945292 (+3 ms)Closed at 1732562945292 2024-11-25T19:29:05,292 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T19:29:05,292 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/data/default/TestLogRolling-testLogRolling/fd0e73512ad398fb0fd0eb24ae1fd9ef/recovered.edits/89.seqid, newMaxSeqId=89, maxSeqId=85 2024-11-25T19:29:05,293 INFO [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732562910694.fd0e73512ad398fb0fd0eb24ae1fd9ef. 2024-11-25T19:29:05,293 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for fd0e73512ad398fb0fd0eb24ae1fd9ef: Waiting for close lock at 1732562945286Running coprocessor pre-close hooks at 1732562945286Disabling compacts and flushes for region at 1732562945286Disabling writes for close at 1732562945287 (+1 ms)Writing region close event to WAL at 1732562945290 (+3 ms)Running coprocessor post-close hooks at 1732562945293 (+3 ms)Closed at 1732562945293 2024-11-25T19:29:05,293 DEBUG [RS_CLOSE_REGION-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732562910694.fd0e73512ad398fb0fd0eb24ae1fd9ef. 2024-11-25T19:29:05,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:29:05,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:29:05,385 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T19:29:05,385 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T19:29:05,386 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-25T19:29:05,445 INFO [RS:0;6ef6ccb75414:41557 {}] regionserver.HRegionServer(976): stopping server 6ef6ccb75414,41557,1732562887438; all regions closed. 2024-11-25T19:29:05,446 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:05,446 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:05,446 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:05,446 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:05,446 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:05,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741834_1010 (size=8107) 2024-11-25T19:29:05,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741834_1010 (size=8107) 2024-11-25T19:29:05,450 DEBUG [RS:0;6ef6ccb75414:41557 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/oldWALs 2024-11-25T19:29:05,450 INFO [RS:0;6ef6ccb75414:41557 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6ef6ccb75414%2C41557%2C1732562887438.meta:.meta(num 1732562888179) 2024-11-25T19:29:05,450 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:05,450 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:05,450 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:05,451 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:05,451 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:05,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741878_1054 (size=778) 2024-11-25T19:29:05,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741878_1054 (size=778) 2024-11-25T19:29:05,454 DEBUG [RS:0;6ef6ccb75414:41557 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/oldWALs 2024-11-25T19:29:05,454 INFO [RS:0;6ef6ccb75414:41557 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6ef6ccb75414%2C41557%2C1732562887438:(num 1732562945176) 2024-11-25T19:29:05,454 DEBUG [RS:0;6ef6ccb75414:41557 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:29:05,454 INFO [RS:0;6ef6ccb75414:41557 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T19:29:05,454 INFO [RS:0;6ef6ccb75414:41557 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T19:29:05,454 INFO [RS:0;6ef6ccb75414:41557 {}] hbase.ChoreService(370): Chore service for: regionserver/6ef6ccb75414:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-25T19:29:05,454 INFO [RS:0;6ef6ccb75414:41557 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T19:29:05,454 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T19:29:05,454 INFO [RS:0;6ef6ccb75414:41557 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41557 2024-11-25T19:29:05,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T19:29:05,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41557-0x100785aff370001, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6ef6ccb75414,41557,1732562887438 2024-11-25T19:29:05,456 INFO [RS:0;6ef6ccb75414:41557 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T19:29:05,456 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6ef6ccb75414,41557,1732562887438] 2024-11-25T19:29:05,457 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6ef6ccb75414,41557,1732562887438 already deleted, retry=false 2024-11-25T19:29:05,457 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6ef6ccb75414,41557,1732562887438 expired; onlineServers=0 2024-11-25T19:29:05,457 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6ef6ccb75414,35203,1732562887392' ***** 2024-11-25T19:29:05,457 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T19:29:05,457 INFO [M:0;6ef6ccb75414:35203 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T19:29:05,457 INFO [M:0;6ef6ccb75414:35203 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T19:29:05,457 DEBUG [M:0;6ef6ccb75414:35203 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T19:29:05,457 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T19:29:05,457 DEBUG [M:0;6ef6ccb75414:35203 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T19:29:05,457 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.large.0-1732562887570 {}] cleaner.HFileCleaner(306): Exit Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.large.0-1732562887570,5,FailOnTimeoutGroup] 2024-11-25T19:29:05,457 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.small.0-1732562887570 {}] cleaner.HFileCleaner(306): Exit Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.small.0-1732562887570,5,FailOnTimeoutGroup] 2024-11-25T19:29:05,458 INFO [M:0;6ef6ccb75414:35203 {}] hbase.ChoreService(370): Chore service for: master/6ef6ccb75414:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T19:29:05,458 INFO [M:0;6ef6ccb75414:35203 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T19:29:05,458 DEBUG [M:0;6ef6ccb75414:35203 {}] master.HMaster(1795): Stopping service threads 2024-11-25T19:29:05,458 INFO [M:0;6ef6ccb75414:35203 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T19:29:05,458 INFO [M:0;6ef6ccb75414:35203 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T19:29:05,458 INFO [M:0;6ef6ccb75414:35203 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T19:29:05,458 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T19:29:05,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T19:29:05,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:29:05,458 DEBUG [M:0;6ef6ccb75414:35203 {}] zookeeper.ZKUtil(347): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-25T19:29:05,458 WARN [M:0;6ef6ccb75414:35203 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-25T19:29:05,459 INFO [M:0;6ef6ccb75414:35203 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/.lastflushedseqids 2024-11-25T19:29:05,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741880_1056 (size=228) 2024-11-25T19:29:05,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741880_1056 (size=228) 2024-11-25T19:29:05,464 INFO [M:0;6ef6ccb75414:35203 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T19:29:05,464 INFO [M:0;6ef6ccb75414:35203 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T19:29:05,464 DEBUG [M:0;6ef6ccb75414:35203 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T19:29:05,464 INFO [M:0;6ef6ccb75414:35203 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:29:05,464 DEBUG [M:0;6ef6ccb75414:35203 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:29:05,464 DEBUG [M:0;6ef6ccb75414:35203 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T19:29:05,464 DEBUG [M:0;6ef6ccb75414:35203 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:29:05,464 INFO [M:0;6ef6ccb75414:35203 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-11-25T19:29:05,479 DEBUG [M:0;6ef6ccb75414:35203 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/78240b684b744bf6a1fa753d621ef563 is 82, key is hbase:meta,,1/info:regioninfo/1732562888208/Put/seqid=0 2024-11-25T19:29:05,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741881_1057 (size=5672) 2024-11-25T19:29:05,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741881_1057 (size=5672) 2024-11-25T19:29:05,484 INFO [M:0;6ef6ccb75414:35203 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/78240b684b744bf6a1fa753d621ef563 2024-11-25T19:29:05,502 DEBUG [M:0;6ef6ccb75414:35203 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e1a1cd07a4174bae89f7ca5852cc6c31 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732562888648/Put/seqid=0 2024-11-25T19:29:05,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741882_1058 (size=7090) 2024-11-25T19:29:05,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741882_1058 (size=7090) 2024-11-25T19:29:05,507 INFO [M:0;6ef6ccb75414:35203 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e1a1cd07a4174bae89f7ca5852cc6c31 2024-11-25T19:29:05,510 INFO [M:0;6ef6ccb75414:35203 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e1a1cd07a4174bae89f7ca5852cc6c31 2024-11-25T19:29:05,523 DEBUG [M:0;6ef6ccb75414:35203 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4086d63b5eff4ca899bf4e3122e078c5 is 69, key is 6ef6ccb75414,41557,1732562887438/rs:state/1732562887681/Put/seqid=0 2024-11-25T19:29:05,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741883_1059 (size=5156) 2024-11-25T19:29:05,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741883_1059 (size=5156) 2024-11-25T19:29:05,528 INFO [M:0;6ef6ccb75414:35203 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4086d63b5eff4ca899bf4e3122e078c5 2024-11-25T19:29:05,548 DEBUG [M:0;6ef6ccb75414:35203 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/89c94acfdc2044a7b2a5a46abcf15485 is 52, key is load_balancer_on/state:d/1732562888266/Put/seqid=0 2024-11-25T19:29:05,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741884_1060 (size=5056) 2024-11-25T19:29:05,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741884_1060 (size=5056) 2024-11-25T19:29:05,553 INFO [M:0;6ef6ccb75414:35203 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/89c94acfdc2044a7b2a5a46abcf15485 2024-11-25T19:29:05,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41557-0x100785aff370001, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:29:05,557 INFO [RS:0;6ef6ccb75414:41557 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T19:29:05,557 INFO [RS:0;6ef6ccb75414:41557 {}] regionserver.HRegionServer(1031): Exiting; stopping=6ef6ccb75414,41557,1732562887438; zookeeper connection closed. 2024-11-25T19:29:05,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41557-0x100785aff370001, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:29:05,557 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2a5d091e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2a5d091e 2024-11-25T19:29:05,557 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-25T19:29:05,558 DEBUG [M:0;6ef6ccb75414:35203 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/78240b684b744bf6a1fa753d621ef563 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/78240b684b744bf6a1fa753d621ef563 2024-11-25T19:29:05,563 INFO [M:0;6ef6ccb75414:35203 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/78240b684b744bf6a1fa753d621ef563, entries=8, sequenceid=125, filesize=5.5 K 2024-11-25T19:29:05,564 DEBUG [M:0;6ef6ccb75414:35203 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e1a1cd07a4174bae89f7ca5852cc6c31 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e1a1cd07a4174bae89f7ca5852cc6c31 2024-11-25T19:29:05,571 INFO [M:0;6ef6ccb75414:35203 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e1a1cd07a4174bae89f7ca5852cc6c31 2024-11-25T19:29:05,571 INFO [M:0;6ef6ccb75414:35203 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e1a1cd07a4174bae89f7ca5852cc6c31, entries=13, sequenceid=125, filesize=6.9 K 2024-11-25T19:29:05,573 DEBUG [M:0;6ef6ccb75414:35203 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4086d63b5eff4ca899bf4e3122e078c5 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4086d63b5eff4ca899bf4e3122e078c5 2024-11-25T19:29:05,585 INFO [M:0;6ef6ccb75414:35203 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4086d63b5eff4ca899bf4e3122e078c5, entries=1, sequenceid=125, filesize=5.0 K 2024-11-25T19:29:05,586 DEBUG [M:0;6ef6ccb75414:35203 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/89c94acfdc2044a7b2a5a46abcf15485 as hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/89c94acfdc2044a7b2a5a46abcf15485 2024-11-25T19:29:05,591 INFO [M:0;6ef6ccb75414:35203 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44093/user/jenkins/test-data/ace16f81-dfd0-54f4-c837-b2013994e291/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/89c94acfdc2044a7b2a5a46abcf15485, entries=1, sequenceid=125, filesize=4.9 K 2024-11-25T19:29:05,592 INFO [M:0;6ef6ccb75414:35203 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=125, compaction requested=false 2024-11-25T19:29:05,601 INFO [M:0;6ef6ccb75414:35203 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:29:05,601 DEBUG [M:0;6ef6ccb75414:35203 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732562945464Disabling compacts and flushes for region at 1732562945464Disabling writes for close at 1732562945464Obtaining lock to block concurrent updates at 1732562945464Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732562945464Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1732562945465 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732562945466 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732562945466Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732562945479 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732562945479Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732562945488 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732562945502 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732562945502Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732562945511 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732562945522 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732562945522Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732562945532 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732562945547 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732562945547Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f219955: reopening flushed file at 1732562945557 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a7c1da0: reopening flushed file at 1732562945563 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@572a1e16: reopening flushed file at 1732562945572 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a28c687: reopening flushed file at 1732562945585 (+13 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=125, compaction requested=false at 1732562945592 (+7 ms)Writing region close event to WAL at 1732562945601 (+9 ms)Closed at 1732562945601 2024-11-25T19:29:05,602 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:05,602 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:05,602 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:05,602 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:05,602 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:05,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44639 is added to blk_1073741830_1006 (size=61320) 2024-11-25T19:29:05,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37281 is added to blk_1073741830_1006 (size=61320) 2024-11-25T19:29:05,606 INFO [M:0;6ef6ccb75414:35203 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T19:29:05,606 INFO [M:0;6ef6ccb75414:35203 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35203 2024-11-25T19:29:05,606 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T19:29:05,606 INFO [M:0;6ef6ccb75414:35203 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T19:29:05,697 INFO [regionserver/6ef6ccb75414:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T19:29:05,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:29:05,708 INFO [M:0;6ef6ccb75414:35203 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T19:29:05,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35203-0x100785aff370000, quorum=127.0.0.1:59410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:29:05,710 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@27cee48d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:29:05,710 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7b29c022{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:29:05,710 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:29:05,710 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@314e7370{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:29:05,710 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@53298b3d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/hadoop.log.dir/,STOPPED} 2024-11-25T19:29:05,711 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T19:29:05,712 WARN [BP-1466434562-172.17.0.2-1732562886809 heartbeating to localhost/127.0.0.1:44093 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T19:29:05,712 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T19:29:05,712 WARN [BP-1466434562-172.17.0.2-1732562886809 heartbeating to localhost/127.0.0.1:44093 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1466434562-172.17.0.2-1732562886809 (Datanode Uuid 3be0a97b-8343-4f9a-af4e-ed27da55a3ef) service to localhost/127.0.0.1:44093 2024-11-25T19:29:05,712 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/cluster_0cfe2bdf-cbac-5acd-bb67-9d8923092911/data/data3/current/BP-1466434562-172.17.0.2-1732562886809 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:29:05,712 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/cluster_0cfe2bdf-cbac-5acd-bb67-9d8923092911/data/data4/current/BP-1466434562-172.17.0.2-1732562886809 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:29:05,713 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T19:29:05,714 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1091e18a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:29:05,715 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3221a4aa{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:29:05,715 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:29:05,715 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d639fc0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:29:05,715 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fc50460{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/hadoop.log.dir/,STOPPED} 2024-11-25T19:29:05,716 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T19:29:05,716 WARN [BP-1466434562-172.17.0.2-1732562886809 heartbeating to localhost/127.0.0.1:44093 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T19:29:05,716 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T19:29:05,716 WARN [BP-1466434562-172.17.0.2-1732562886809 heartbeating to localhost/127.0.0.1:44093 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1466434562-172.17.0.2-1732562886809 (Datanode Uuid 7db64782-c815-4771-9898-bcb168e6348c) service to localhost/127.0.0.1:44093 2024-11-25T19:29:05,717 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/cluster_0cfe2bdf-cbac-5acd-bb67-9d8923092911/data/data1/current/BP-1466434562-172.17.0.2-1732562886809 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:29:05,717 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/cluster_0cfe2bdf-cbac-5acd-bb67-9d8923092911/data/data2/current/BP-1466434562-172.17.0.2-1732562886809 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:29:05,717 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T19:29:05,722 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@188ddc10{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T19:29:05,723 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5fc37f93{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:29:05,723 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:29:05,723 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@240fc28c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:29:05,723 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59703725{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/hadoop.log.dir/,STOPPED} 2024-11-25T19:29:05,729 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T19:29:05,756 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T19:29:05,766 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=230 (was 206) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44093 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44093 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44093 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44093 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44093 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44093 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44093 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:44093 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44093 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=515 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=137 (was 67) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5384 (was 4982) - AvailableMemoryMB LEAK? - 2024-11-25T19:29:05,773 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=230, OpenFileDescriptor=515, MaxFileDescriptor=1048576, SystemLoadAverage=137, ProcessCount=11, AvailableMemoryMB=5384 2024-11-25T19:29:05,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T19:29:05,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/hadoop.log.dir so I do NOT create it in target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7 2024-11-25T19:29:05,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40f67353-862f-69b7-c394-41230cf2c3ab/hadoop.tmp.dir so I do NOT create it in target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7 2024-11-25T19:29:05,774 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/cluster_75df32ee-8b6a-f486-3106-2eb295576dc2, deleteOnExit=true 2024-11-25T19:29:05,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-25T19:29:05,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/test.cache.data in system properties and HBase conf 2024-11-25T19:29:05,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T19:29:05,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/hadoop.log.dir in system properties and HBase conf 2024-11-25T19:29:05,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T19:29:05,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T19:29:05,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T19:29:05,774 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T19:29:05,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T19:29:05,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T19:29:05,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T19:29:05,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T19:29:05,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T19:29:05,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T19:29:05,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T19:29:05,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T19:29:05,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T19:29:05,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/nfs.dump.dir in system properties and HBase conf 2024-11-25T19:29:05,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/java.io.tmpdir in system properties and HBase conf 2024-11-25T19:29:05,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T19:29:05,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T19:29:05,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T19:29:05,788 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T19:29:05,827 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:29:05,831 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:29:05,832 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:29:05,832 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:29:05,832 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T19:29:05,833 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:29:05,833 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73e64b52{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:29:05,833 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4eef4e65{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:29:05,926 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@21ba3242{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/java.io.tmpdir/jetty-localhost-38181-hadoop-hdfs-3_4_1-tests_jar-_-any-941515221136596651/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T19:29:05,927 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3d10f4fd{HTTP/1.1, (http/1.1)}{localhost:38181} 2024-11-25T19:29:05,927 INFO [Time-limited test {}] server.Server(415): Started @296515ms 2024-11-25T19:29:05,938 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T19:29:05,975 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:29:05,977 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:29:05,978 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:29:05,978 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:29:05,978 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T19:29:05,978 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a89713d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:29:05,978 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@52a4d688{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:29:06,074 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1853cb8d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/java.io.tmpdir/jetty-localhost-32975-hadoop-hdfs-3_4_1-tests_jar-_-any-17000914277128998109/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:29:06,075 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@758e212c{HTTP/1.1, (http/1.1)}{localhost:32975} 2024-11-25T19:29:06,075 INFO [Time-limited test {}] server.Server(415): Started @296663ms 2024-11-25T19:29:06,076 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T19:29:06,106 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T19:29:06,111 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T19:29:06,112 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T19:29:06,112 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T19:29:06,112 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T19:29:06,112 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1391b748{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/hadoop.log.dir/,AVAILABLE} 2024-11-25T19:29:06,113 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22748d48{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T19:29:06,142 WARN [Thread-2466 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/cluster_75df32ee-8b6a-f486-3106-2eb295576dc2/data/data1/current/BP-888033461-172.17.0.2-1732562945791/current, will proceed with Du for space computation calculation, 2024-11-25T19:29:06,142 WARN [Thread-2467 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/cluster_75df32ee-8b6a-f486-3106-2eb295576dc2/data/data2/current/BP-888033461-172.17.0.2-1732562945791/current, will proceed with Du for space computation calculation, 2024-11-25T19:29:06,162 WARN [Thread-2445 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T19:29:06,164 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x72f5040981cb7b71 with lease ID 0x21071ea8e521018e: Processing first storage report for DS-aede1cd7-3eb0-41d2-a4c1-53d0bda56aae from datanode DatanodeRegistration(127.0.0.1:40851, datanodeUuid=5b44762b-3672-4d8b-b35b-87b16963191e, infoPort=40113, infoSecurePort=0, ipcPort=37555, storageInfo=lv=-57;cid=testClusterID;nsid=1978282833;c=1732562945791) 2024-11-25T19:29:06,164 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x72f5040981cb7b71 with lease ID 0x21071ea8e521018e: from storage DS-aede1cd7-3eb0-41d2-a4c1-53d0bda56aae node DatanodeRegistration(127.0.0.1:40851, datanodeUuid=5b44762b-3672-4d8b-b35b-87b16963191e, infoPort=40113, infoSecurePort=0, ipcPort=37555, storageInfo=lv=-57;cid=testClusterID;nsid=1978282833;c=1732562945791), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-25T19:29:06,164 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x72f5040981cb7b71 with lease ID 0x21071ea8e521018e: Processing first storage report for DS-dfe30627-7923-4434-9a86-234708048789 from datanode DatanodeRegistration(127.0.0.1:40851, datanodeUuid=5b44762b-3672-4d8b-b35b-87b16963191e, infoPort=40113, infoSecurePort=0, ipcPort=37555, storageInfo=lv=-57;cid=testClusterID;nsid=1978282833;c=1732562945791) 2024-11-25T19:29:06,164 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x72f5040981cb7b71 with lease ID 0x21071ea8e521018e: from storage DS-dfe30627-7923-4434-9a86-234708048789 node DatanodeRegistration(127.0.0.1:40851, datanodeUuid=5b44762b-3672-4d8b-b35b-87b16963191e, infoPort=40113, infoSecurePort=0, ipcPort=37555, storageInfo=lv=-57;cid=testClusterID;nsid=1978282833;c=1732562945791), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:29:06,215 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@589971c8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/java.io.tmpdir/jetty-localhost-41023-hadoop-hdfs-3_4_1-tests_jar-_-any-11669043111849153118/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:29:06,215 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4548a64b{HTTP/1.1, (http/1.1)}{localhost:41023} 2024-11-25T19:29:06,215 INFO [Time-limited test {}] server.Server(415): Started @296803ms 2024-11-25T19:29:06,216 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T19:29:06,282 WARN [Thread-2492 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/cluster_75df32ee-8b6a-f486-3106-2eb295576dc2/data/data3/current/BP-888033461-172.17.0.2-1732562945791/current, will proceed with Du for space computation calculation, 2024-11-25T19:29:06,282 WARN [Thread-2493 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/cluster_75df32ee-8b6a-f486-3106-2eb295576dc2/data/data4/current/BP-888033461-172.17.0.2-1732562945791/current, will proceed with Du for space computation calculation, 2024-11-25T19:29:06,300 WARN [Thread-2481 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T19:29:06,302 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x353b6fcff27fd06f with lease ID 0x21071ea8e521018f: Processing first storage report for DS-170b1148-64a9-4240-a79c-8c6b0c2ca4cf from datanode DatanodeRegistration(127.0.0.1:32959, datanodeUuid=50d31d56-3c1e-468c-8f6f-5362cf07e486, infoPort=35587, infoSecurePort=0, ipcPort=35803, storageInfo=lv=-57;cid=testClusterID;nsid=1978282833;c=1732562945791) 2024-11-25T19:29:06,302 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x353b6fcff27fd06f with lease ID 0x21071ea8e521018f: from storage DS-170b1148-64a9-4240-a79c-8c6b0c2ca4cf node DatanodeRegistration(127.0.0.1:32959, datanodeUuid=50d31d56-3c1e-468c-8f6f-5362cf07e486, infoPort=35587, infoSecurePort=0, ipcPort=35803, storageInfo=lv=-57;cid=testClusterID;nsid=1978282833;c=1732562945791), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:29:06,302 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x353b6fcff27fd06f with lease ID 0x21071ea8e521018f: Processing first storage report for DS-29bf2dbe-1b2f-4d9e-87c1-93440159e0b0 from datanode DatanodeRegistration(127.0.0.1:32959, datanodeUuid=50d31d56-3c1e-468c-8f6f-5362cf07e486, infoPort=35587, infoSecurePort=0, ipcPort=35803, storageInfo=lv=-57;cid=testClusterID;nsid=1978282833;c=1732562945791) 2024-11-25T19:29:06,303 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x353b6fcff27fd06f with lease ID 0x21071ea8e521018f: from storage DS-29bf2dbe-1b2f-4d9e-87c1-93440159e0b0 node DatanodeRegistration(127.0.0.1:32959, datanodeUuid=50d31d56-3c1e-468c-8f6f-5362cf07e486, infoPort=35587, infoSecurePort=0, ipcPort=35803, storageInfo=lv=-57;cid=testClusterID;nsid=1978282833;c=1732562945791), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T19:29:06,349 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7 2024-11-25T19:29:06,353 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/cluster_75df32ee-8b6a-f486-3106-2eb295576dc2/zookeeper_0, clientPort=58626, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/cluster_75df32ee-8b6a-f486-3106-2eb295576dc2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/cluster_75df32ee-8b6a-f486-3106-2eb295576dc2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T19:29:06,356 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58626 2024-11-25T19:29:06,356 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:29:06,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:29:06,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:29:06,358 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:29:06,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741825_1001 (size=7) 2024-11-25T19:29:06,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741825_1001 (size=7) 2024-11-25T19:29:06,372 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94 with version=8 2024-11-25T19:29:06,372 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35499/user/jenkins/test-data/ba830e48-4a04-b9cc-5b94-44bbe37fb678/hbase-staging 2024-11-25T19:29:06,374 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6ef6ccb75414:0 server-side Connection retries=45 2024-11-25T19:29:06,374 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:29:06,374 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T19:29:06,374 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T19:29:06,374 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:29:06,374 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T19:29:06,374 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T19:29:06,374 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T19:29:06,375 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38415 2024-11-25T19:29:06,376 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38415 connecting to ZooKeeper ensemble=127.0.0.1:58626 2024-11-25T19:29:06,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:384150x0, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T19:29:06,381 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38415-0x100785be59d0000 connected 2024-11-25T19:29:06,397 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:29:06,398 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:29:06,402 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:29:06,402 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94, hbase.cluster.distributed=false 2024-11-25T19:29:06,403 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T19:29:06,404 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38415 2024-11-25T19:29:06,405 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38415 2024-11-25T19:29:06,409 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38415 2024-11-25T19:29:06,409 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38415 2024-11-25T19:29:06,409 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38415 2024-11-25T19:29:06,423 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6ef6ccb75414:0 server-side Connection retries=45 2024-11-25T19:29:06,423 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:29:06,423 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T19:29:06,423 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T19:29:06,423 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T19:29:06,423 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T19:29:06,423 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T19:29:06,423 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T19:29:06,424 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34863 2024-11-25T19:29:06,425 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34863 connecting to ZooKeeper ensemble=127.0.0.1:58626 2024-11-25T19:29:06,425 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:29:06,427 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:29:06,430 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:348630x0, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T19:29:06,430 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:348630x0, quorum=127.0.0.1:58626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:29:06,430 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34863-0x100785be59d0001 connected 2024-11-25T19:29:06,431 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T19:29:06,433 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T19:29:06,434 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34863-0x100785be59d0001, quorum=127.0.0.1:58626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T19:29:06,435 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34863-0x100785be59d0001, quorum=127.0.0.1:58626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T19:29:06,436 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34863 2024-11-25T19:29:06,436 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34863 2024-11-25T19:29:06,436 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34863 2024-11-25T19:29:06,439 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34863 2024-11-25T19:29:06,439 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34863 2024-11-25T19:29:06,451 DEBUG [M:0;6ef6ccb75414:38415 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6ef6ccb75414:38415 2024-11-25T19:29:06,451 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6ef6ccb75414,38415,1732562946373 2024-11-25T19:29:06,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:29:06,452 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34863-0x100785be59d0001, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:29:06,453 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6ef6ccb75414,38415,1732562946373 2024-11-25T19:29:06,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:29:06,454 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T19:29:06,454 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6ef6ccb75414,38415,1732562946373 from backup master directory 2024-11-25T19:29:06,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6ef6ccb75414,38415,1732562946373 2024-11-25T19:29:06,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:29:06,455 WARN [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T19:29:06,455 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6ef6ccb75414,38415,1732562946373 2024-11-25T19:29:06,455 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34863-0x100785be59d0001, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T19:29:06,455 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34863-0x100785be59d0001, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:29:06,455 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34863-0x100785be59d0001, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T19:29:06,464 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/hbase.id] with ID: 9bb563fd-eaf2-4fdd-a9b6-6c2235a4ff7a 2024-11-25T19:29:06,465 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/.tmp/hbase.id 2024-11-25T19:29:06,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741826_1002 (size=42) 2024-11-25T19:29:06,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741826_1002 (size=42) 2024-11-25T19:29:06,472 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/.tmp/hbase.id]:[hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/hbase.id] 2024-11-25T19:29:06,482 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:29:06,483 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T19:29:06,484 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-25T19:29:06,485 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34863-0x100785be59d0001, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:29:06,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:29:06,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741827_1003 (size=196) 2024-11-25T19:29:06,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741827_1003 (size=196) 2024-11-25T19:29:06,894 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T19:29:06,895 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T19:29:06,896 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:29:06,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741828_1004 (size=1189) 2024-11-25T19:29:06,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741828_1004 (size=1189) 2024-11-25T19:29:06,908 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store 2024-11-25T19:29:06,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741829_1005 (size=34) 2024-11-25T19:29:06,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741829_1005 (size=34) 2024-11-25T19:29:06,919 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:29:06,919 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T19:29:06,919 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:29:06,919 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:29:06,919 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T19:29:06,919 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:29:06,919 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:29:06,919 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732562946919Disabling compacts and flushes for region at 1732562946919Disabling writes for close at 1732562946919Writing region close event to WAL at 1732562946919Closed at 1732562946919 2024-11-25T19:29:06,920 WARN [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/.initializing 2024-11-25T19:29:06,920 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/WALs/6ef6ccb75414,38415,1732562946373 2024-11-25T19:29:06,930 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ef6ccb75414%2C38415%2C1732562946373, suffix=, logDir=hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/WALs/6ef6ccb75414,38415,1732562946373, archiveDir=hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/oldWALs, maxLogs=10 2024-11-25T19:29:06,930 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C38415%2C1732562946373.1732562946930 2024-11-25T19:29:06,938 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/WALs/6ef6ccb75414,38415,1732562946373/6ef6ccb75414%2C38415%2C1732562946373.1732562946930 2024-11-25T19:29:06,945 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40113:40113),(127.0.0.1/127.0.0.1:35587:35587)] 2024-11-25T19:29:06,948 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T19:29:06,949 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:29:06,949 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:29:06,949 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:29:06,952 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:29:06,953 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T19:29:06,953 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:29:06,954 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:29:06,954 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:29:06,955 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T19:29:06,955 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:29:06,955 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:29:06,956 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:29:06,957 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T19:29:06,957 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:29:06,957 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:29:06,957 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:29:06,958 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T19:29:06,958 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:29:06,959 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T19:29:06,959 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:29:06,960 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:29:06,960 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:29:06,961 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:29:06,961 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:29:06,961 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T19:29:06,962 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T19:29:06,965 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T19:29:06,966 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=867792, jitterRate=0.10345514118671417}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T19:29:06,966 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732562946949Initializing all the Stores at 1732562946950 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562946950Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562946951 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562946951Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562946951Cleaning up temporary data from old regions at 1732562946961 (+10 ms)Region opened successfully at 1732562946966 (+5 ms) 2024-11-25T19:29:06,967 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T19:29:06,970 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28fb325e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ef6ccb75414/172.17.0.2:0 2024-11-25T19:29:06,971 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T19:29:06,971 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T19:29:06,971 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T19:29:06,971 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T19:29:06,971 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-25T19:29:06,972 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-25T19:29:06,972 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T19:29:06,975 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T19:29:06,976 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T19:29:06,977 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T19:29:06,977 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T19:29:06,978 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T19:29:06,979 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T19:29:06,980 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T19:29:06,982 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T19:29:06,982 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T19:29:06,983 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T19:29:06,984 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T19:29:06,986 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T19:29:06,987 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T19:29:06,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T19:29:06,988 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34863-0x100785be59d0001, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T19:29:06,988 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34863-0x100785be59d0001, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:29:06,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:29:06,989 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6ef6ccb75414,38415,1732562946373, sessionid=0x100785be59d0000, setting cluster-up flag (Was=false) 2024-11-25T19:29:06,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:29:06,991 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34863-0x100785be59d0001, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:29:06,994 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T19:29:06,995 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6ef6ccb75414,38415,1732562946373 2024-11-25T19:29:06,997 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34863-0x100785be59d0001, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:29:06,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:29:07,000 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T19:29:07,001 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6ef6ccb75414,38415,1732562946373 2024-11-25T19:29:07,002 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T19:29:07,006 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T19:29:07,007 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T19:29:07,007 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T19:29:07,007 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6ef6ccb75414,38415,1732562946373 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T19:29:07,008 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:29:07,008 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:29:07,009 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:29:07,009 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=5, maxPoolSize=5 2024-11-25T19:29:07,009 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6ef6ccb75414:0, corePoolSize=10, maxPoolSize=10 2024-11-25T19:29:07,009 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:29:07,009 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=2, maxPoolSize=2 2024-11-25T19:29:07,009 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:29:07,012 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T19:29:07,012 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T19:29:07,013 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:29:07,013 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T19:29:07,017 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732562977017 2024-11-25T19:29:07,017 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T19:29:07,017 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T19:29:07,017 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T19:29:07,017 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T19:29:07,017 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T19:29:07,017 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T19:29:07,021 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T19:29:07,022 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T19:29:07,022 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T19:29:07,022 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T19:29:07,025 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T19:29:07,025 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T19:29:07,029 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.large.0-1732562947025,5,FailOnTimeoutGroup] 2024-11-25T19:29:07,029 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.small.0-1732562947029,5,FailOnTimeoutGroup] 2024-11-25T19:29:07,029 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T19:29:07,029 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T19:29:07,029 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T19:29:07,030 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T19:29:07,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741831_1007 (size=1321) 2024-11-25T19:29:07,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741831_1007 (size=1321) 2024-11-25T19:29:07,036 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T19:29:07,036 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94 2024-11-25T19:29:07,046 INFO [RS:0;6ef6ccb75414:34863 {}] regionserver.HRegionServer(746): ClusterId : 9bb563fd-eaf2-4fdd-a9b6-6c2235a4ff7a 2024-11-25T19:29:07,046 DEBUG [RS:0;6ef6ccb75414:34863 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T19:29:07,048 DEBUG [RS:0;6ef6ccb75414:34863 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T19:29:07,049 DEBUG [RS:0;6ef6ccb75414:34863 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T19:29:07,050 DEBUG [RS:0;6ef6ccb75414:34863 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T19:29:07,051 DEBUG [RS:0;6ef6ccb75414:34863 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63016c9e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ef6ccb75414/172.17.0.2:0 2024-11-25T19:29:07,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741832_1008 (size=32) 2024-11-25T19:29:07,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741832_1008 (size=32) 2024-11-25T19:29:07,063 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:29:07,065 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T19:29:07,066 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T19:29:07,066 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:29:07,067 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:29:07,067 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T19:29:07,068 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T19:29:07,068 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:29:07,069 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:29:07,069 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T19:29:07,071 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T19:29:07,071 DEBUG [RS:0;6ef6ccb75414:34863 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6ef6ccb75414:34863 2024-11-25T19:29:07,071 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:29:07,071 INFO [RS:0;6ef6ccb75414:34863 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T19:29:07,071 INFO [RS:0;6ef6ccb75414:34863 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T19:29:07,071 DEBUG [RS:0;6ef6ccb75414:34863 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T19:29:07,072 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:29:07,072 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T19:29:07,073 INFO [RS:0;6ef6ccb75414:34863 {}] regionserver.HRegionServer(2659): reportForDuty to master=6ef6ccb75414,38415,1732562946373 with port=34863, startcode=1732562946422 2024-11-25T19:29:07,073 DEBUG [RS:0;6ef6ccb75414:34863 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T19:29:07,074 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T19:29:07,074 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:29:07,074 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:29:07,075 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T19:29:07,076 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/data/hbase/meta/1588230740 2024-11-25T19:29:07,076 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/data/hbase/meta/1588230740 2024-11-25T19:29:07,078 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T19:29:07,078 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T19:29:07,078 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33181, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T19:29:07,078 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T19:29:07,078 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38415 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6ef6ccb75414,34863,1732562946422 2024-11-25T19:29:07,079 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38415 {}] master.ServerManager(517): Registering regionserver=6ef6ccb75414,34863,1732562946422 2024-11-25T19:29:07,080 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T19:29:07,080 DEBUG [RS:0;6ef6ccb75414:34863 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94 2024-11-25T19:29:07,080 DEBUG [RS:0;6ef6ccb75414:34863 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43475 2024-11-25T19:29:07,080 DEBUG [RS:0;6ef6ccb75414:34863 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T19:29:07,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T19:29:07,082 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T19:29:07,083 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=712111, jitterRate=-0.09450514614582062}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T19:29:07,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732562947063Initializing all the Stores at 1732562947064 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562947064Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562947064Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562947064Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562947064Cleaning up temporary data from old regions at 1732562947078 (+14 ms)Region opened successfully at 1732562947084 (+6 ms) 2024-11-25T19:29:07,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T19:29:07,084 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T19:29:07,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T19:29:07,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T19:29:07,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T19:29:07,085 DEBUG [RS:0;6ef6ccb75414:34863 {}] zookeeper.ZKUtil(111): regionserver:34863-0x100785be59d0001, quorum=127.0.0.1:58626, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6ef6ccb75414,34863,1732562946422 2024-11-25T19:29:07,085 WARN [RS:0;6ef6ccb75414:34863 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T19:29:07,086 INFO [RS:0;6ef6ccb75414:34863 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:29:07,086 DEBUG [RS:0;6ef6ccb75414:34863 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/WALs/6ef6ccb75414,34863,1732562946422 2024-11-25T19:29:07,090 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T19:29:07,090 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732562947084Disabling compacts and flushes for region at 1732562947084Disabling writes for close at 1732562947084Writing region close event to WAL at 1732562947090 (+6 ms)Closed at 1732562947090 2024-11-25T19:29:07,090 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6ef6ccb75414,34863,1732562946422] 2024-11-25T19:29:07,091 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T19:29:07,091 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T19:29:07,091 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T19:29:07,093 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T19:29:07,094 INFO [RS:0;6ef6ccb75414:34863 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T19:29:07,094 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T19:29:07,097 INFO [RS:0;6ef6ccb75414:34863 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T19:29:07,098 INFO [RS:0;6ef6ccb75414:34863 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T19:29:07,098 INFO [RS:0;6ef6ccb75414:34863 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:29:07,098 INFO [RS:0;6ef6ccb75414:34863 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T19:29:07,099 INFO [RS:0;6ef6ccb75414:34863 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T19:29:07,099 INFO [RS:0;6ef6ccb75414:34863 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T19:29:07,099 DEBUG [RS:0;6ef6ccb75414:34863 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:29:07,100 DEBUG [RS:0;6ef6ccb75414:34863 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:29:07,100 DEBUG [RS:0;6ef6ccb75414:34863 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:29:07,100 DEBUG [RS:0;6ef6ccb75414:34863 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:29:07,100 DEBUG [RS:0;6ef6ccb75414:34863 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:29:07,100 DEBUG [RS:0;6ef6ccb75414:34863 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6ef6ccb75414:0, corePoolSize=2, maxPoolSize=2 2024-11-25T19:29:07,100 DEBUG [RS:0;6ef6ccb75414:34863 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:29:07,100 DEBUG [RS:0;6ef6ccb75414:34863 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:29:07,100 DEBUG [RS:0;6ef6ccb75414:34863 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:29:07,100 DEBUG [RS:0;6ef6ccb75414:34863 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:29:07,100 DEBUG [RS:0;6ef6ccb75414:34863 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:29:07,100 DEBUG [RS:0;6ef6ccb75414:34863 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6ef6ccb75414:0, corePoolSize=1, maxPoolSize=1 2024-11-25T19:29:07,100 DEBUG [RS:0;6ef6ccb75414:34863 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6ef6ccb75414:0, corePoolSize=3, maxPoolSize=3 2024-11-25T19:29:07,100 DEBUG [RS:0;6ef6ccb75414:34863 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6ef6ccb75414:0, corePoolSize=3, maxPoolSize=3 2024-11-25T19:29:07,105 INFO [RS:0;6ef6ccb75414:34863 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T19:29:07,105 INFO [RS:0;6ef6ccb75414:34863 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T19:29:07,105 INFO [RS:0;6ef6ccb75414:34863 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:29:07,105 INFO [RS:0;6ef6ccb75414:34863 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T19:29:07,105 INFO [RS:0;6ef6ccb75414:34863 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T19:29:07,105 INFO [RS:0;6ef6ccb75414:34863 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,34863,1732562946422-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T19:29:07,125 INFO [RS:0;6ef6ccb75414:34863 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T19:29:07,125 INFO [RS:0;6ef6ccb75414:34863 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,34863,1732562946422-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:29:07,126 INFO [RS:0;6ef6ccb75414:34863 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:29:07,126 INFO [RS:0;6ef6ccb75414:34863 {}] regionserver.Replication(171): 6ef6ccb75414,34863,1732562946422 started 2024-11-25T19:29:07,146 INFO [RS:0;6ef6ccb75414:34863 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:29:07,146 INFO [RS:0;6ef6ccb75414:34863 {}] regionserver.HRegionServer(1482): Serving as 6ef6ccb75414,34863,1732562946422, RpcServer on 6ef6ccb75414/172.17.0.2:34863, sessionid=0x100785be59d0001 2024-11-25T19:29:07,146 DEBUG [RS:0;6ef6ccb75414:34863 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T19:29:07,146 DEBUG [RS:0;6ef6ccb75414:34863 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6ef6ccb75414,34863,1732562946422 2024-11-25T19:29:07,146 DEBUG [RS:0;6ef6ccb75414:34863 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ef6ccb75414,34863,1732562946422' 2024-11-25T19:29:07,146 DEBUG [RS:0;6ef6ccb75414:34863 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T19:29:07,147 DEBUG [RS:0;6ef6ccb75414:34863 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T19:29:07,147 DEBUG [RS:0;6ef6ccb75414:34863 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T19:29:07,147 DEBUG [RS:0;6ef6ccb75414:34863 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T19:29:07,147 DEBUG [RS:0;6ef6ccb75414:34863 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6ef6ccb75414,34863,1732562946422 2024-11-25T19:29:07,147 DEBUG [RS:0;6ef6ccb75414:34863 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ef6ccb75414,34863,1732562946422' 2024-11-25T19:29:07,147 DEBUG [RS:0;6ef6ccb75414:34863 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T19:29:07,148 DEBUG [RS:0;6ef6ccb75414:34863 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T19:29:07,148 DEBUG [RS:0;6ef6ccb75414:34863 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T19:29:07,148 INFO [RS:0;6ef6ccb75414:34863 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T19:29:07,148 INFO [RS:0;6ef6ccb75414:34863 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T19:29:07,245 WARN [6ef6ccb75414:38415 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-25T19:29:07,250 INFO [RS:0;6ef6ccb75414:34863 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ef6ccb75414%2C34863%2C1732562946422, suffix=, logDir=hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/WALs/6ef6ccb75414,34863,1732562946422, archiveDir=hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/oldWALs, maxLogs=32 2024-11-25T19:29:07,251 INFO [RS:0;6ef6ccb75414:34863 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C34863%2C1732562946422.1732562947251 2024-11-25T19:29:07,262 INFO [RS:0;6ef6ccb75414:34863 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/WALs/6ef6ccb75414,34863,1732562946422/6ef6ccb75414%2C34863%2C1732562946422.1732562947251 2024-11-25T19:29:07,270 DEBUG [RS:0;6ef6ccb75414:34863 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40113:40113),(127.0.0.1/127.0.0.1:35587:35587)] 2024-11-25T19:29:07,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:29:07,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:29:07,495 DEBUG [6ef6ccb75414:38415 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-25T19:29:07,496 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6ef6ccb75414,34863,1732562946422 2024-11-25T19:29:07,497 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6ef6ccb75414,34863,1732562946422, state=OPENING 2024-11-25T19:29:07,498 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T19:29:07,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:29:07,499 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34863-0x100785be59d0001, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:29:07,499 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:29:07,499 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:29:07,499 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T19:29:07,500 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6ef6ccb75414,34863,1732562946422}] 2024-11-25T19:29:07,651 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T19:29:07,654 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54911, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T19:29:07,657 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T19:29:07,657 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:29:07,659 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ef6ccb75414%2C34863%2C1732562946422.meta, suffix=.meta, logDir=hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/WALs/6ef6ccb75414,34863,1732562946422, archiveDir=hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/oldWALs, maxLogs=32 2024-11-25T19:29:07,659 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ef6ccb75414%2C34863%2C1732562946422.meta.1732562947659.meta 2024-11-25T19:29:07,673 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/WALs/6ef6ccb75414,34863,1732562946422/6ef6ccb75414%2C34863%2C1732562946422.meta.1732562947659.meta 2024-11-25T19:29:07,680 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40113:40113),(127.0.0.1/127.0.0.1:35587:35587)] 2024-11-25T19:29:07,691 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T19:29:07,691 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T19:29:07,691 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T19:29:07,691 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T19:29:07,691 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T19:29:07,691 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T19:29:07,691 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T19:29:07,691 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T19:29:07,693 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T19:29:07,694 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T19:29:07,694 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:29:07,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:29:07,695 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T19:29:07,695 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T19:29:07,695 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:29:07,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:29:07,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T19:29:07,697 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T19:29:07,697 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:29:07,698 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:29:07,698 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T19:29:07,699 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T19:29:07,699 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T19:29:07,699 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T19:29:07,700 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T19:29:07,700 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/data/hbase/meta/1588230740 2024-11-25T19:29:07,701 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/data/hbase/meta/1588230740 2024-11-25T19:29:07,703 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T19:29:07,703 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T19:29:07,704 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T19:29:07,705 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T19:29:07,706 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=755839, jitterRate=-0.03890193998813629}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T19:29:07,706 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T19:29:07,706 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732562947692Writing region info on filesystem at 1732562947692Initializing all the Stores at 1732562947693 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562947693Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562947693Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732562947693Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732562947693Cleaning up temporary data from old regions at 1732562947703 (+10 ms)Running coprocessor post-open hooks at 1732562947706 (+3 ms)Region opened successfully at 1732562947706 2024-11-25T19:29:07,707 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732562947651 2024-11-25T19:29:07,709 DEBUG [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T19:29:07,710 INFO [RS_OPEN_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T19:29:07,710 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6ef6ccb75414,34863,1732562946422 2024-11-25T19:29:07,711 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6ef6ccb75414,34863,1732562946422, state=OPEN 2024-11-25T19:29:07,713 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34863-0x100785be59d0001, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T19:29:07,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T19:29:07,713 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6ef6ccb75414,34863,1732562946422 2024-11-25T19:29:07,713 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:29:07,714 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T19:29:07,716 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T19:29:07,716 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6ef6ccb75414,34863,1732562946422 in 213 msec 2024-11-25T19:29:07,719 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T19:29:07,719 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 625 msec 2024-11-25T19:29:07,720 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T19:29:07,720 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T19:29:07,721 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T19:29:07,721 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6ef6ccb75414,34863,1732562946422, seqNum=-1] 2024-11-25T19:29:07,722 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T19:29:07,723 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52473, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T19:29:07,729 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 722 msec 2024-11-25T19:29:07,730 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732562947729, completionTime=-1 2024-11-25T19:29:07,730 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-25T19:29:07,730 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T19:29:07,732 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-25T19:29:07,732 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732563007732 2024-11-25T19:29:07,732 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732563067732 2024-11-25T19:29:07,732 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-25T19:29:07,733 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,38415,1732562946373-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T19:29:07,733 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,38415,1732562946373-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:29:07,733 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,38415,1732562946373-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:29:07,733 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6ef6ccb75414:38415, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:29:07,733 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T19:29:07,735 DEBUG [master/6ef6ccb75414:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T19:29:07,737 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T19:29:07,742 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.286sec 2024-11-25T19:29:07,742 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T19:29:07,742 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T19:29:07,742 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T19:29:07,742 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T19:29:07,742 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T19:29:07,742 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,38415,1732562946373-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T19:29:07,742 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,38415,1732562946373-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T19:29:07,752 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@186b2d16, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T19:29:07,752 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6ef6ccb75414,38415,-1 for getting cluster id 2024-11-25T19:29:07,752 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T19:29:07,753 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T19:29:07,753 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T19:29:07,753 INFO [master/6ef6ccb75414:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ef6ccb75414,38415,1732562946373-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T19:29:07,754 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9bb563fd-eaf2-4fdd-a9b6-6c2235a4ff7a' 2024-11-25T19:29:07,754 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T19:29:07,754 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9bb563fd-eaf2-4fdd-a9b6-6c2235a4ff7a" 2024-11-25T19:29:07,755 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f9b706a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T19:29:07,755 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6ef6ccb75414,38415,-1] 2024-11-25T19:29:07,755 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T19:29:07,756 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:29:07,758 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37748, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T19:29:07,759 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@115f614b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T19:29:07,759 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T19:29:07,760 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6ef6ccb75414,34863,1732562946422, seqNum=-1] 2024-11-25T19:29:07,761 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T19:29:07,762 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37968, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T19:29:07,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6ef6ccb75414,38415,1732562946373 2024-11-25T19:29:07,765 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T19:29:07,768 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-25T19:29:07,768 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T19:29:07,771 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/WALs/test.com,8080,1, archiveDir=hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/oldWALs, maxLogs=32 2024-11-25T19:29:07,773 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732562947773 2024-11-25T19:29:07,791 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/WALs/test.com,8080,1/test.com%2C8080%2C1.1732562947773 2024-11-25T19:29:07,797 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35587:35587),(127.0.0.1/127.0.0.1:40113:40113)] 2024-11-25T19:29:07,802 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732562947802 2024-11-25T19:29:07,814 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:07,814 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:07,814 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:07,814 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:07,814 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:07,815 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/WALs/test.com,8080,1/test.com%2C8080%2C1.1732562947773 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/WALs/test.com,8080,1/test.com%2C8080%2C1.1732562947802 2024-11-25T19:29:07,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741835_1011 (size=93) 2024-11-25T19:29:07,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741835_1011 (size=93) 2024-11-25T19:29:07,822 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35587:35587),(127.0.0.1/127.0.0.1:40113:40113)] 2024-11-25T19:29:07,822 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/WALs/test.com,8080,1/test.com%2C8080%2C1.1732562947773 is not closed yet, will try archiving it next time 2024-11-25T19:29:07,824 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:07,824 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:07,824 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:07,824 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:07,824 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:07,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741836_1012 (size=93) 2024-11-25T19:29:07,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741836_1012 (size=93) 2024-11-25T19:29:08,220 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/WALs/test.com,8080,1/test.com%2C8080%2C1.1732562947773 to hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/oldWALs/test.com%2C8080%2C1.1732562947773 2024-11-25T19:29:08,223 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/oldWALs 2024-11-25T19:29:08,223 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732562947802) 2024-11-25T19:29:08,223 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T19:29:08,223 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T19:29:08,223 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T19:29:08,223 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:29:08,223 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:29:08,223 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T19:29:08,223 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T19:29:08,223 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=395890905, stopped=false 2024-11-25T19:29:08,223 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6ef6ccb75414,38415,1732562946373 2024-11-25T19:29:08,224 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34863-0x100785be59d0001, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T19:29:08,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T19:29:08,224 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34863-0x100785be59d0001, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:29:08,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:29:08,224 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T19:29:08,225 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T19:29:08,225 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T19:29:08,225 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:29:08,225 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34863-0x100785be59d0001, quorum=127.0.0.1:58626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:29:08,225 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6ef6ccb75414,34863,1732562946422' ***** 2024-11-25T19:29:08,225 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T19:29:08,225 INFO [RS:0;6ef6ccb75414:34863 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T19:29:08,225 INFO [RS:0;6ef6ccb75414:34863 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T19:29:08,225 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T19:29:08,225 INFO [RS:0;6ef6ccb75414:34863 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T19:29:08,225 INFO [RS:0;6ef6ccb75414:34863 {}] regionserver.HRegionServer(959): stopping server 6ef6ccb75414,34863,1732562946422 2024-11-25T19:29:08,225 INFO [RS:0;6ef6ccb75414:34863 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T19:29:08,225 INFO [RS:0;6ef6ccb75414:34863 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6ef6ccb75414:34863. 2024-11-25T19:29:08,225 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T19:29:08,225 DEBUG [RS:0;6ef6ccb75414:34863 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T19:29:08,225 DEBUG [RS:0;6ef6ccb75414:34863 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:29:08,226 INFO [RS:0;6ef6ccb75414:34863 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T19:29:08,226 INFO [RS:0;6ef6ccb75414:34863 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T19:29:08,226 INFO [RS:0;6ef6ccb75414:34863 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T19:29:08,226 INFO [RS:0;6ef6ccb75414:34863 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T19:29:08,226 INFO [RS:0;6ef6ccb75414:34863 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-25T19:29:08,226 DEBUG [RS:0;6ef6ccb75414:34863 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-25T19:29:08,226 DEBUG [RS:0;6ef6ccb75414:34863 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-25T19:29:08,226 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T19:29:08,226 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T19:29:08,226 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T19:29:08,226 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T19:29:08,226 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T19:29:08,226 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-25T19:29:08,241 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/data/hbase/meta/1588230740/.tmp/ns/94876fe623484f21be625c1e076ee90f is 43, key is default/ns:d/1732562947724/Put/seqid=0 2024-11-25T19:29:08,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741837_1013 (size=5153) 2024-11-25T19:29:08,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741837_1013 (size=5153) 2024-11-25T19:29:08,246 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/data/hbase/meta/1588230740/.tmp/ns/94876fe623484f21be625c1e076ee90f 2024-11-25T19:29:08,251 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/data/hbase/meta/1588230740/.tmp/ns/94876fe623484f21be625c1e076ee90f as hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/data/hbase/meta/1588230740/ns/94876fe623484f21be625c1e076ee90f 2024-11-25T19:29:08,255 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/data/hbase/meta/1588230740/ns/94876fe623484f21be625c1e076ee90f, entries=2, sequenceid=6, filesize=5.0 K 2024-11-25T19:29:08,256 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false 2024-11-25T19:29:08,256 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-25T19:29:08,260 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-25T19:29:08,260 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T19:29:08,261 INFO [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T19:29:08,261 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732562948226Running coprocessor pre-close hooks at 1732562948226Disabling compacts and flushes for region at 1732562948226Disabling writes for close at 1732562948226Obtaining lock to block concurrent updates at 1732562948226Preparing flush snapshotting stores in 1588230740 at 1732562948226Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732562948226Flushing stores of hbase:meta,,1.1588230740 at 1732562948227 (+1 ms)Flushing 1588230740/ns: creating writer at 1732562948227Flushing 1588230740/ns: appending metadata at 1732562948241 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732562948241Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d77e788: reopening flushed file at 1732562948250 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false at 1732562948256 (+6 ms)Writing region close event to WAL at 1732562948257 (+1 ms)Running coprocessor post-close hooks at 1732562948260 (+3 ms)Closed at 1732562948260 2024-11-25T19:29:08,261 DEBUG [RS_CLOSE_META-regionserver/6ef6ccb75414:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T19:29:08,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,38981,1732562756052/6ef6ccb75414%2C38981%2C1732562756052.meta.1732562757075.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:29:08,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40559/user/jenkins/test-data/ab4aba43-cd62-1326-59bd-5ecb93d3f555/WALs/6ef6ccb75414,37563,1732562757286/6ef6ccb75414%2C37563%2C1732562757286.1732562757484 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T19:29:08,426 INFO [RS:0;6ef6ccb75414:34863 {}] regionserver.HRegionServer(976): stopping server 6ef6ccb75414,34863,1732562946422; all regions closed. 2024-11-25T19:29:08,427 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:08,427 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:08,427 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:08,427 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:08,427 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:08,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741834_1010 (size=1152) 2024-11-25T19:29:08,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741834_1010 (size=1152) 2024-11-25T19:29:08,431 DEBUG [RS:0;6ef6ccb75414:34863 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/oldWALs 2024-11-25T19:29:08,431 INFO [RS:0;6ef6ccb75414:34863 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6ef6ccb75414%2C34863%2C1732562946422.meta:.meta(num 1732562947659) 2024-11-25T19:29:08,431 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:08,431 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:08,431 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:08,432 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:08,432 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:08,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741833_1009 (size=93) 2024-11-25T19:29:08,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741833_1009 (size=93) 2024-11-25T19:29:08,435 DEBUG [RS:0;6ef6ccb75414:34863 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/oldWALs 2024-11-25T19:29:08,435 INFO [RS:0;6ef6ccb75414:34863 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6ef6ccb75414%2C34863%2C1732562946422:(num 1732562947251) 2024-11-25T19:29:08,435 DEBUG [RS:0;6ef6ccb75414:34863 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T19:29:08,435 INFO [RS:0;6ef6ccb75414:34863 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T19:29:08,435 INFO [RS:0;6ef6ccb75414:34863 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T19:29:08,435 INFO [RS:0;6ef6ccb75414:34863 {}] hbase.ChoreService(370): Chore service for: regionserver/6ef6ccb75414:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-25T19:29:08,436 INFO [RS:0;6ef6ccb75414:34863 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T19:29:08,436 INFO [regionserver/6ef6ccb75414:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T19:29:08,436 INFO [RS:0;6ef6ccb75414:34863 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34863 2024-11-25T19:29:08,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T19:29:08,437 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34863-0x100785be59d0001, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6ef6ccb75414,34863,1732562946422 2024-11-25T19:29:08,437 INFO [RS:0;6ef6ccb75414:34863 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T19:29:08,438 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6ef6ccb75414,34863,1732562946422] 2024-11-25T19:29:08,438 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6ef6ccb75414,34863,1732562946422 already deleted, retry=false 2024-11-25T19:29:08,438 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6ef6ccb75414,34863,1732562946422 expired; onlineServers=0 2024-11-25T19:29:08,438 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6ef6ccb75414,38415,1732562946373' ***** 2024-11-25T19:29:08,438 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T19:29:08,438 INFO [M:0;6ef6ccb75414:38415 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T19:29:08,439 INFO [M:0;6ef6ccb75414:38415 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T19:29:08,439 DEBUG [M:0;6ef6ccb75414:38415 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T19:29:08,439 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T19:29:08,439 DEBUG [M:0;6ef6ccb75414:38415 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T19:29:08,439 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.small.0-1732562947029 {}] cleaner.HFileCleaner(306): Exit Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.small.0-1732562947029,5,FailOnTimeoutGroup] 2024-11-25T19:29:08,439 DEBUG [master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.large.0-1732562947025 {}] cleaner.HFileCleaner(306): Exit Thread[master/6ef6ccb75414:0:becomeActiveMaster-HFileCleaner.large.0-1732562947025,5,FailOnTimeoutGroup] 2024-11-25T19:29:08,439 INFO [M:0;6ef6ccb75414:38415 {}] hbase.ChoreService(370): Chore service for: master/6ef6ccb75414:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T19:29:08,439 INFO [M:0;6ef6ccb75414:38415 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T19:29:08,439 DEBUG [M:0;6ef6ccb75414:38415 {}] master.HMaster(1795): Stopping service threads 2024-11-25T19:29:08,439 INFO [M:0;6ef6ccb75414:38415 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T19:29:08,439 INFO [M:0;6ef6ccb75414:38415 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T19:29:08,439 INFO [M:0;6ef6ccb75414:38415 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T19:29:08,439 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T19:29:08,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T19:29:08,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T19:29:08,440 DEBUG [M:0;6ef6ccb75414:38415 {}] zookeeper.ZKUtil(347): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-25T19:29:08,440 WARN [M:0;6ef6ccb75414:38415 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-25T19:29:08,440 INFO [M:0;6ef6ccb75414:38415 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/.lastflushedseqids 2024-11-25T19:29:08,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741838_1014 (size=99) 2024-11-25T19:29:08,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741838_1014 (size=99) 2024-11-25T19:29:08,451 INFO [M:0;6ef6ccb75414:38415 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T19:29:08,451 INFO [M:0;6ef6ccb75414:38415 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T19:29:08,451 DEBUG [M:0;6ef6ccb75414:38415 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T19:29:08,451 INFO [M:0;6ef6ccb75414:38415 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:29:08,451 DEBUG [M:0;6ef6ccb75414:38415 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:29:08,451 DEBUG [M:0;6ef6ccb75414:38415 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T19:29:08,451 DEBUG [M:0;6ef6ccb75414:38415 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:29:08,451 INFO [M:0;6ef6ccb75414:38415 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-25T19:29:08,468 DEBUG [M:0;6ef6ccb75414:38415 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/13d7d7e6859746c18c436e307338f13b is 82, key is hbase:meta,,1/info:regioninfo/1732562947710/Put/seqid=0 2024-11-25T19:29:08,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741839_1015 (size=5672) 2024-11-25T19:29:08,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741839_1015 (size=5672) 2024-11-25T19:29:08,472 INFO [M:0;6ef6ccb75414:38415 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/13d7d7e6859746c18c436e307338f13b 2024-11-25T19:29:08,493 DEBUG [M:0;6ef6ccb75414:38415 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e93d82eefb804fba9cb3b84fd18edc22 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732562947729/Put/seqid=0 2024-11-25T19:29:08,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741840_1016 (size=5275) 2024-11-25T19:29:08,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741840_1016 (size=5275) 2024-11-25T19:29:08,498 INFO [M:0;6ef6ccb75414:38415 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e93d82eefb804fba9cb3b84fd18edc22 2024-11-25T19:29:08,519 DEBUG [M:0;6ef6ccb75414:38415 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c8c100c1c2d34fb6b992765e0d255a8f is 69, key is 6ef6ccb75414,34863,1732562946422/rs:state/1732562947079/Put/seqid=0 2024-11-25T19:29:08,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741841_1017 (size=5156) 2024-11-25T19:29:08,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741841_1017 (size=5156) 2024-11-25T19:29:08,524 INFO [M:0;6ef6ccb75414:38415 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c8c100c1c2d34fb6b992765e0d255a8f 2024-11-25T19:29:08,538 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34863-0x100785be59d0001, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:29:08,538 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34863-0x100785be59d0001, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:29:08,538 INFO [RS:0;6ef6ccb75414:34863 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T19:29:08,538 INFO [RS:0;6ef6ccb75414:34863 {}] regionserver.HRegionServer(1031): Exiting; stopping=6ef6ccb75414,34863,1732562946422; zookeeper connection closed. 2024-11-25T19:29:08,539 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@456eca71 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@456eca71 2024-11-25T19:29:08,539 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-25T19:29:08,548 DEBUG [M:0;6ef6ccb75414:38415 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4eeccef303ff4b9f82d47fff1dbc0241 is 52, key is load_balancer_on/state:d/1732562947767/Put/seqid=0 2024-11-25T19:29:08,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741842_1018 (size=5056) 2024-11-25T19:29:08,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741842_1018 (size=5056) 2024-11-25T19:29:08,560 INFO [M:0;6ef6ccb75414:38415 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4eeccef303ff4b9f82d47fff1dbc0241 2024-11-25T19:29:08,566 DEBUG [M:0;6ef6ccb75414:38415 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/13d7d7e6859746c18c436e307338f13b as hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/13d7d7e6859746c18c436e307338f13b 2024-11-25T19:29:08,572 INFO [M:0;6ef6ccb75414:38415 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/13d7d7e6859746c18c436e307338f13b, entries=8, sequenceid=29, filesize=5.5 K 2024-11-25T19:29:08,573 DEBUG [M:0;6ef6ccb75414:38415 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e93d82eefb804fba9cb3b84fd18edc22 as hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e93d82eefb804fba9cb3b84fd18edc22 2024-11-25T19:29:08,579 INFO [M:0;6ef6ccb75414:38415 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e93d82eefb804fba9cb3b84fd18edc22, entries=3, sequenceid=29, filesize=5.2 K 2024-11-25T19:29:08,580 DEBUG [M:0;6ef6ccb75414:38415 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c8c100c1c2d34fb6b992765e0d255a8f as hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c8c100c1c2d34fb6b992765e0d255a8f 2024-11-25T19:29:08,585 INFO [M:0;6ef6ccb75414:38415 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c8c100c1c2d34fb6b992765e0d255a8f, entries=1, sequenceid=29, filesize=5.0 K 2024-11-25T19:29:08,586 DEBUG [M:0;6ef6ccb75414:38415 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4eeccef303ff4b9f82d47fff1dbc0241 as hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4eeccef303ff4b9f82d47fff1dbc0241 2024-11-25T19:29:08,591 INFO [M:0;6ef6ccb75414:38415 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43475/user/jenkins/test-data/5f8d7f39-8f7c-8b56-848f-544d7754ce94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4eeccef303ff4b9f82d47fff1dbc0241, entries=1, sequenceid=29, filesize=4.9 K 2024-11-25T19:29:08,592 INFO [M:0;6ef6ccb75414:38415 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 141ms, sequenceid=29, compaction requested=false 2024-11-25T19:29:08,593 INFO [M:0;6ef6ccb75414:38415 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T19:29:08,593 DEBUG [M:0;6ef6ccb75414:38415 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732562948451Disabling compacts and flushes for region at 1732562948451Disabling writes for close at 1732562948451Obtaining lock to block concurrent updates at 1732562948451Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732562948451Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732562948451Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732562948452 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732562948452Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732562948467 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732562948467Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732562948477 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732562948492 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732562948492Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732562948502 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732562948518 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732562948518Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732562948529 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732562948548 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732562948548Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ce79c38: reopening flushed file at 1732562948565 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11aa16b2: reopening flushed file at 1732562948572 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32517b10: reopening flushed file at 1732562948579 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41726a0b: reopening flushed file at 1732562948585 (+6 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 141ms, sequenceid=29, compaction requested=false at 1732562948592 (+7 ms)Writing region close event to WAL at 1732562948593 (+1 ms)Closed at 1732562948593 2024-11-25T19:29:08,594 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:08,594 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:08,594 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:08,594 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:08,594 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T19:29:08,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741830_1006 (size=10311) 2024-11-25T19:29:08,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741830_1006 (size=10311) 2024-11-25T19:29:08,596 INFO [M:0;6ef6ccb75414:38415 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T19:29:08,596 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T19:29:08,596 INFO [M:0;6ef6ccb75414:38415 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38415 2024-11-25T19:29:08,597 INFO [M:0;6ef6ccb75414:38415 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T19:29:08,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:29:08,698 INFO [M:0;6ef6ccb75414:38415 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T19:29:08,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38415-0x100785be59d0000, quorum=127.0.0.1:58626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T19:29:08,701 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@589971c8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:29:08,701 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4548a64b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:29:08,701 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:29:08,702 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22748d48{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:29:08,702 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1391b748{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/hadoop.log.dir/,STOPPED} 2024-11-25T19:29:08,704 WARN [BP-888033461-172.17.0.2-1732562945791 heartbeating to localhost/127.0.0.1:43475 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T19:29:08,704 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T19:29:08,704 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T19:29:08,704 WARN [BP-888033461-172.17.0.2-1732562945791 heartbeating to localhost/127.0.0.1:43475 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-888033461-172.17.0.2-1732562945791 (Datanode Uuid 50d31d56-3c1e-468c-8f6f-5362cf07e486) service to localhost/127.0.0.1:43475 2024-11-25T19:29:08,704 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/cluster_75df32ee-8b6a-f486-3106-2eb295576dc2/data/data3/current/BP-888033461-172.17.0.2-1732562945791 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:29:08,705 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/cluster_75df32ee-8b6a-f486-3106-2eb295576dc2/data/data4/current/BP-888033461-172.17.0.2-1732562945791 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:29:08,705 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T19:29:08,722 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1853cb8d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T19:29:08,722 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@758e212c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:29:08,723 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:29:08,723 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@52a4d688{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:29:08,723 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a89713d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/hadoop.log.dir/,STOPPED} 2024-11-25T19:29:08,725 WARN [BP-888033461-172.17.0.2-1732562945791 heartbeating to localhost/127.0.0.1:43475 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T19:29:08,725 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T19:29:08,725 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T19:29:08,725 WARN [BP-888033461-172.17.0.2-1732562945791 heartbeating to localhost/127.0.0.1:43475 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-888033461-172.17.0.2-1732562945791 (Datanode Uuid 5b44762b-3672-4d8b-b35b-87b16963191e) service to localhost/127.0.0.1:43475 2024-11-25T19:29:08,726 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/cluster_75df32ee-8b6a-f486-3106-2eb295576dc2/data/data1/current/BP-888033461-172.17.0.2-1732562945791 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:29:08,726 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/cluster_75df32ee-8b6a-f486-3106-2eb295576dc2/data/data2/current/BP-888033461-172.17.0.2-1732562945791 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T19:29:08,726 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T19:29:08,734 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@21ba3242{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T19:29:08,735 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3d10f4fd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T19:29:08,735 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T19:29:08,735 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4eef4e65{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T19:29:08,735 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73e64b52{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/75c8bd37-3127-96fa-bd59-b2d8519949a7/hadoop.log.dir/,STOPPED} 2024-11-25T19:29:08,746 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T19:29:08,769 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T19:29:08,782 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=267 (was 230) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43475 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:43475 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43475 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43475 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43475 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43475 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43475 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43475 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=538 (was 515) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=174 (was 137) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5995 (was 5384) - AvailableMemoryMB LEAK? -