2024-11-18 20:20:09,271 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-18 20:20:09,287 main DEBUG Took 0.012947 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-18 20:20:09,288 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-18 20:20:09,288 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-18 20:20:09,289 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-18 20:20:09,290 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:20:09,297 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-18 20:20:09,308 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:20:09,310 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:20:09,311 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:20:09,312 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:20:09,313 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:20:09,313 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:20:09,314 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:20:09,315 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:20:09,316 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:20:09,316 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:20:09,317 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:20:09,318 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:20:09,318 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:20:09,318 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:20:09,319 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:20:09,319 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:20:09,320 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:20:09,320 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:20:09,320 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:20:09,321 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:20:09,321 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:20:09,321 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:20:09,322 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:20:09,322 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:20:09,322 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:20:09,323 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-18 20:20:09,324 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:20:09,326 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-18 20:20:09,328 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-18 20:20:09,328 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-18 20:20:09,329 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-18 20:20:09,330 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-18 20:20:09,340 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-18 20:20:09,343 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-18 20:20:09,345 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-18 20:20:09,345 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-18 20:20:09,346 main DEBUG createAppenders(={Console}) 2024-11-18 20:20:09,347 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-18 20:20:09,347 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-18 20:20:09,347 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-18 20:20:09,348 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-18 20:20:09,348 main DEBUG OutputStream closed 2024-11-18 20:20:09,348 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-18 20:20:09,349 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-18 20:20:09,349 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-18 20:20:09,425 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-18 20:20:09,427 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-18 20:20:09,429 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-18 20:20:09,430 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-18 20:20:09,431 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-18 20:20:09,431 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-18 20:20:09,432 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-18 20:20:09,432 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-18 20:20:09,432 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-18 20:20:09,433 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-18 20:20:09,433 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-18 20:20:09,434 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-18 20:20:09,434 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-18 20:20:09,434 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-18 20:20:09,435 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-18 20:20:09,435 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-18 20:20:09,435 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-18 20:20:09,436 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-18 20:20:09,439 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-18 20:20:09,439 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-18 20:20:09,440 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-18 20:20:09,441 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-18T20:20:09,670 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4 2024-11-18 20:20:09,673 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-18 20:20:09,674 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-18T20:20:09,682 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-18T20:20:09,725 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=1030, ProcessCount=11, AvailableMemoryMB=5003 2024-11-18T20:20:09,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T20:20:09,744 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/cluster_7d761752-b0e6-23b7-8e41-dacf9d8650e1, deleteOnExit=true 2024-11-18T20:20:09,744 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T20:20:09,745 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/test.cache.data in system properties and HBase conf 2024-11-18T20:20:09,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T20:20:09,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/hadoop.log.dir in system properties and HBase conf 2024-11-18T20:20:09,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T20:20:09,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T20:20:09,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T20:20:09,847 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-18T20:20:09,958 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T20:20:09,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:20:09,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:20:09,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T20:20:09,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:20:09,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T20:20:09,966 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T20:20:09,966 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:20:09,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:20:09,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T20:20:09,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/nfs.dump.dir in system properties and HBase conf 2024-11-18T20:20:09,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/java.io.tmpdir in system properties and HBase conf 2024-11-18T20:20:09,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:20:09,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T20:20:09,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T20:20:10,431 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:20:10,916 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-18T20:20:10,992 INFO [Time-limited test {}] log.Log(170): Logging initialized @2482ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-18T20:20:11,067 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:20:11,143 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:20:11,217 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:20:11,217 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:20:11,221 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:20:11,249 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:20:11,265 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:20:11,266 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:20:11,547 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/java.io.tmpdir/jetty-localhost-37673-hadoop-hdfs-3_4_1-tests_jar-_-any-8112891685820966012/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:20:11,560 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:37673} 2024-11-18T20:20:11,561 INFO [Time-limited test {}] server.Server(415): Started @3052ms 2024-11-18T20:20:11,603 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:20:12,172 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:20:12,185 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:20:12,207 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:20:12,207 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:20:12,208 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:20:12,211 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:20:12,212 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:20:12,344 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/java.io.tmpdir/jetty-localhost-42309-hadoop-hdfs-3_4_1-tests_jar-_-any-17821109176449736617/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:20:12,345 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:42309} 2024-11-18T20:20:12,345 INFO [Time-limited test {}] server.Server(415): Started @3837ms 2024-11-18T20:20:12,426 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:20:12,662 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:20:12,676 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:20:12,694 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:20:12,694 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:20:12,695 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:20:12,696 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:20:12,698 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:20:12,841 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/java.io.tmpdir/jetty-localhost-44215-hadoop-hdfs-3_4_1-tests_jar-_-any-1799046226917972711/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:20:12,842 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:44215} 2024-11-18T20:20:12,842 INFO [Time-limited test {}] server.Server(415): Started @4333ms 2024-11-18T20:20:12,844 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:20:13,758 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/cluster_7d761752-b0e6-23b7-8e41-dacf9d8650e1/data/data4/current/BP-489932828-172.17.0.2-1731961210518/current, will proceed with Du for space computation calculation, 2024-11-18T20:20:13,758 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/cluster_7d761752-b0e6-23b7-8e41-dacf9d8650e1/data/data1/current/BP-489932828-172.17.0.2-1731961210518/current, will proceed with Du for space computation calculation, 2024-11-18T20:20:13,758 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/cluster_7d761752-b0e6-23b7-8e41-dacf9d8650e1/data/data2/current/BP-489932828-172.17.0.2-1731961210518/current, will proceed with Du for space computation calculation, 2024-11-18T20:20:13,758 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/cluster_7d761752-b0e6-23b7-8e41-dacf9d8650e1/data/data3/current/BP-489932828-172.17.0.2-1731961210518/current, will proceed with Du for space computation calculation, 2024-11-18T20:20:13,800 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:20:13,801 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:20:13,851 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcbe579b932d5396 with lease ID 0x1fa3d1485378b321: Processing first storage report for DS-635bcde8-50a6-4cab-8c70-55a5831b63ab from datanode DatanodeRegistration(127.0.0.1:43521, datanodeUuid=dcb5832a-ce8d-4cae-8f01-c13c366be491, infoPort=43077, infoSecurePort=0, ipcPort=38783, storageInfo=lv=-57;cid=testClusterID;nsid=74701062;c=1731961210518) 2024-11-18T20:20:13,852 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcbe579b932d5396 with lease ID 0x1fa3d1485378b321: from storage DS-635bcde8-50a6-4cab-8c70-55a5831b63ab node DatanodeRegistration(127.0.0.1:43521, datanodeUuid=dcb5832a-ce8d-4cae-8f01-c13c366be491, infoPort=43077, infoSecurePort=0, ipcPort=38783, storageInfo=lv=-57;cid=testClusterID;nsid=74701062;c=1731961210518), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T20:20:13,853 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7dd85c5a4794cf0e with lease ID 0x1fa3d1485378b322: Processing first storage report for DS-29660b43-ddb0-4066-9faa-41155cd64011 from datanode DatanodeRegistration(127.0.0.1:39627, datanodeUuid=1a7fd30e-06b3-4141-9e41-761750e8e4bc, infoPort=40185, infoSecurePort=0, ipcPort=43133, storageInfo=lv=-57;cid=testClusterID;nsid=74701062;c=1731961210518) 2024-11-18T20:20:13,853 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7dd85c5a4794cf0e with lease ID 0x1fa3d1485378b322: from storage DS-29660b43-ddb0-4066-9faa-41155cd64011 node DatanodeRegistration(127.0.0.1:39627, datanodeUuid=1a7fd30e-06b3-4141-9e41-761750e8e4bc, infoPort=40185, infoSecurePort=0, ipcPort=43133, storageInfo=lv=-57;cid=testClusterID;nsid=74701062;c=1731961210518), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:20:13,853 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcbe579b932d5396 with lease ID 0x1fa3d1485378b321: Processing first storage report for DS-a7d5db3b-7e21-4cb0-8b61-6d91b29caa8f from datanode DatanodeRegistration(127.0.0.1:43521, datanodeUuid=dcb5832a-ce8d-4cae-8f01-c13c366be491, infoPort=43077, infoSecurePort=0, ipcPort=38783, storageInfo=lv=-57;cid=testClusterID;nsid=74701062;c=1731961210518) 2024-11-18T20:20:13,853 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcbe579b932d5396 with lease ID 0x1fa3d1485378b321: from storage DS-a7d5db3b-7e21-4cb0-8b61-6d91b29caa8f node DatanodeRegistration(127.0.0.1:43521, datanodeUuid=dcb5832a-ce8d-4cae-8f01-c13c366be491, infoPort=43077, infoSecurePort=0, ipcPort=38783, storageInfo=lv=-57;cid=testClusterID;nsid=74701062;c=1731961210518), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T20:20:13,854 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7dd85c5a4794cf0e with lease ID 0x1fa3d1485378b322: Processing first storage report for DS-d5fc612c-8b96-466e-83f6-f8f0c5597995 from datanode DatanodeRegistration(127.0.0.1:39627, datanodeUuid=1a7fd30e-06b3-4141-9e41-761750e8e4bc, infoPort=40185, infoSecurePort=0, ipcPort=43133, storageInfo=lv=-57;cid=testClusterID;nsid=74701062;c=1731961210518) 2024-11-18T20:20:13,854 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7dd85c5a4794cf0e with lease ID 0x1fa3d1485378b322: from storage DS-d5fc612c-8b96-466e-83f6-f8f0c5597995 node DatanodeRegistration(127.0.0.1:39627, datanodeUuid=1a7fd30e-06b3-4141-9e41-761750e8e4bc, infoPort=40185, infoSecurePort=0, ipcPort=43133, storageInfo=lv=-57;cid=testClusterID;nsid=74701062;c=1731961210518), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:20:13,947 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4 2024-11-18T20:20:14,047 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/cluster_7d761752-b0e6-23b7-8e41-dacf9d8650e1/zookeeper_0, clientPort=55888, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/cluster_7d761752-b0e6-23b7-8e41-dacf9d8650e1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/cluster_7d761752-b0e6-23b7-8e41-dacf9d8650e1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T20:20:14,058 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55888 2024-11-18T20:20:14,073 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:20:14,075 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:20:14,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:20:14,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:20:14,786 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578 with version=8 2024-11-18T20:20:14,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/hbase-staging 2024-11-18T20:20:14,913 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-18T20:20:15,164 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5a964fc427ed:0 server-side Connection retries=45 2024-11-18T20:20:15,173 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:20:15,174 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:20:15,180 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:20:15,180 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:20:15,181 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:20:15,339 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T20:20:15,396 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-18T20:20:15,405 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-18T20:20:15,409 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:20:15,435 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 62755 (auto-detected) 2024-11-18T20:20:15,436 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-18T20:20:15,455 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37233 2024-11-18T20:20:15,477 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37233 connecting to ZooKeeper ensemble=127.0.0.1:55888 2024-11-18T20:20:15,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:372330x0, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:20:15,573 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37233-0x10150c899f60000 connected 2024-11-18T20:20:15,642 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:20:15,644 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:20:15,658 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:20:15,664 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578, hbase.cluster.distributed=false 2024-11-18T20:20:15,695 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:20:15,702 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37233 2024-11-18T20:20:15,703 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37233 2024-11-18T20:20:15,703 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37233 2024-11-18T20:20:15,704 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37233 2024-11-18T20:20:15,704 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37233 2024-11-18T20:20:15,806 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5a964fc427ed:0 server-side Connection retries=45 2024-11-18T20:20:15,808 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:20:15,808 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:20:15,808 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:20:15,808 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:20:15,809 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:20:15,811 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T20:20:15,814 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:20:15,815 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40777 2024-11-18T20:20:15,818 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40777 connecting to ZooKeeper ensemble=127.0.0.1:55888 2024-11-18T20:20:15,819 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:20:15,826 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:20:15,856 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:407770x0, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:20:15,859 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40777-0x10150c899f60001 connected 2024-11-18T20:20:15,863 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40777-0x10150c899f60001, quorum=127.0.0.1:55888, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:20:15,869 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T20:20:15,877 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T20:20:15,880 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40777-0x10150c899f60001, quorum=127.0.0.1:55888, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T20:20:15,887 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40777-0x10150c899f60001, quorum=127.0.0.1:55888, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:20:15,888 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40777 2024-11-18T20:20:15,889 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40777 2024-11-18T20:20:15,890 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40777 2024-11-18T20:20:15,891 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40777 2024-11-18T20:20:15,891 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40777 2024-11-18T20:20:15,906 DEBUG [M:0;5a964fc427ed:37233 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5a964fc427ed:37233 2024-11-18T20:20:15,907 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5a964fc427ed,37233,1731961214997 2024-11-18T20:20:15,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:20:15,922 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40777-0x10150c899f60001, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:20:15,923 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5a964fc427ed,37233,1731961214997 2024-11-18T20:20:15,954 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40777-0x10150c899f60001, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T20:20:15,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:20:15,954 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40777-0x10150c899f60001, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:20:15,955 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T20:20:15,956 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5a964fc427ed,37233,1731961214997 from backup master directory 2024-11-18T20:20:15,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5a964fc427ed,37233,1731961214997 2024-11-18T20:20:15,965 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40777-0x10150c899f60001, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:20:15,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:20:15,965 WARN [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:20:15,966 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5a964fc427ed,37233,1731961214997 2024-11-18T20:20:15,967 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-18T20:20:15,969 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-18T20:20:16,040 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/hbase.id] with ID: 88f44570-d6cb-4e68-8366-4df58da045ec 2024-11-18T20:20:16,040 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/.tmp/hbase.id 2024-11-18T20:20:16,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:20:16,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:20:16,068 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/.tmp/hbase.id]:[hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/hbase.id] 2024-11-18T20:20:16,117 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:20:16,122 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T20:20:16,142 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-11-18T20:20:16,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:20:16,154 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40777-0x10150c899f60001, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:20:16,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:20:16,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:20:16,601 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:20:16,603 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T20:20:16,608 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:20:16,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:20:16,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:20:16,669 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store 2024-11-18T20:20:16,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:20:16,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:20:16,703 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-18T20:20:16,706 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:20:16,707 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:20:16,707 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:20:16,708 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:20:16,709 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:20:16,709 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:20:16,710 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:20:16,711 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961216707Disabling compacts and flushes for region at 1731961216707Disabling writes for close at 1731961216709 (+2 ms)Writing region close event to WAL at 1731961216709Closed at 1731961216710 (+1 ms) 2024-11-18T20:20:16,714 WARN [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/.initializing 2024-11-18T20:20:16,714 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/WALs/5a964fc427ed,37233,1731961214997 2024-11-18T20:20:16,739 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5a964fc427ed%2C37233%2C1731961214997, suffix=, logDir=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/WALs/5a964fc427ed,37233,1731961214997, archiveDir=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/oldWALs, maxLogs=10 2024-11-18T20:20:16,749 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C37233%2C1731961214997.1731961216744 2024-11-18T20:20:16,769 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/WALs/5a964fc427ed,37233,1731961214997/5a964fc427ed%2C37233%2C1731961214997.1731961216744 2024-11-18T20:20:16,779 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40185:40185),(127.0.0.1/127.0.0.1:43077:43077)] 2024-11-18T20:20:16,780 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:20:16,781 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:20:16,784 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:20:16,785 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:20:16,821 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:20:16,846 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T20:20:16,850 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:20:16,852 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:20:16,853 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:20:16,857 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T20:20:16,858 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:20:16,859 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:20:16,859 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:20:16,862 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T20:20:16,862 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:20:16,863 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:20:16,864 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:20:16,867 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T20:20:16,867 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:20:16,868 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:20:16,869 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:20:16,873 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:20:16,875 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:20:16,882 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:20:16,883 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:20:16,888 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T20:20:16,893 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:20:16,899 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:20:16,901 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=688438, jitterRate=-0.12460610270500183}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T20:20:16,909 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731961216797Initializing all the Stores at 1731961216799 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961216800 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961216800Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961216801 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961216801Cleaning up temporary data from old regions at 1731961216883 (+82 ms)Region opened successfully at 1731961216909 (+26 ms) 2024-11-18T20:20:16,911 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T20:20:16,949 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5480bea1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5a964fc427ed/172.17.0.2:0 2024-11-18T20:20:16,982 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T20:20:16,992 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T20:20:16,992 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T20:20:16,995 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T20:20:16,996 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-18T20:20:17,001 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-18T20:20:17,001 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T20:20:17,027 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T20:20:17,039 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T20:20:17,048 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T20:20:17,051 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T20:20:17,052 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T20:20:17,062 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T20:20:17,066 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T20:20:17,071 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T20:20:17,115 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T20:20:17,118 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T20:20:17,129 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T20:20:17,153 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T20:20:17,162 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T20:20:17,173 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40777-0x10150c899f60001, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:20:17,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:20:17,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:20:17,173 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40777-0x10150c899f60001, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:20:17,176 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5a964fc427ed,37233,1731961214997, sessionid=0x10150c899f60000, setting cluster-up flag (Was=false) 2024-11-18T20:20:17,198 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40777-0x10150c899f60001, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:20:17,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:20:17,223 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T20:20:17,227 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5a964fc427ed,37233,1731961214997 2024-11-18T20:20:17,248 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40777-0x10150c899f60001, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:20:17,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:20:17,273 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T20:20:17,276 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5a964fc427ed,37233,1731961214997 2024-11-18T20:20:17,285 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T20:20:17,296 INFO [RS:0;5a964fc427ed:40777 {}] regionserver.HRegionServer(746): ClusterId : 88f44570-d6cb-4e68-8366-4df58da045ec 2024-11-18T20:20:17,299 DEBUG [RS:0;5a964fc427ed:40777 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T20:20:17,308 DEBUG [RS:0;5a964fc427ed:40777 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T20:20:17,309 DEBUG [RS:0;5a964fc427ed:40777 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T20:20:17,316 DEBUG [RS:0;5a964fc427ed:40777 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T20:20:17,317 DEBUG [RS:0;5a964fc427ed:40777 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35ee0771, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5a964fc427ed/172.17.0.2:0 2024-11-18T20:20:17,332 DEBUG [RS:0;5a964fc427ed:40777 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5a964fc427ed:40777 2024-11-18T20:20:17,334 INFO [RS:0;5a964fc427ed:40777 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T20:20:17,335 INFO [RS:0;5a964fc427ed:40777 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T20:20:17,335 DEBUG [RS:0;5a964fc427ed:40777 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T20:20:17,337 INFO [RS:0;5a964fc427ed:40777 {}] regionserver.HRegionServer(2659): reportForDuty to master=5a964fc427ed,37233,1731961214997 with port=40777, startcode=1731961215773 2024-11-18T20:20:17,347 DEBUG [RS:0;5a964fc427ed:40777 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T20:20:17,352 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T20:20:17,361 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T20:20:17,367 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T20:20:17,372 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5a964fc427ed,37233,1731961214997 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T20:20:17,379 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:20:17,380 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:20:17,380 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:20:17,380 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:20:17,380 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5a964fc427ed:0, corePoolSize=10, maxPoolSize=10 2024-11-18T20:20:17,380 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:20:17,381 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5a964fc427ed:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:20:17,381 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:20:17,382 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731961247382 2024-11-18T20:20:17,384 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T20:20:17,386 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:20:17,386 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T20:20:17,387 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T20:20:17,391 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T20:20:17,391 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T20:20:17,392 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T20:20:17,392 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T20:20:17,392 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:20:17,392 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T20:20:17,392 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:20:17,396 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T20:20:17,397 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T20:20:17,398 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T20:20:17,400 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T20:20:17,401 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T20:20:17,404 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.large.0-1731961217402,5,FailOnTimeoutGroup] 2024-11-18T20:20:17,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:20:17,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:20:17,414 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T20:20:17,414 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578 2024-11-18T20:20:17,416 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.small.0-1731961217404,5,FailOnTimeoutGroup] 2024-11-18T20:20:17,416 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:20:17,417 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T20:20:17,418 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T20:20:17,419 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34941, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T20:20:17,419 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T20:20:17,426 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37233 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5a964fc427ed,40777,1731961215773 2024-11-18T20:20:17,428 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37233 {}] master.ServerManager(517): Registering regionserver=5a964fc427ed,40777,1731961215773 2024-11-18T20:20:17,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:20:17,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:20:17,435 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:20:17,439 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:20:17,443 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:20:17,443 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:20:17,445 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:20:17,445 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:20:17,447 DEBUG [RS:0;5a964fc427ed:40777 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578 2024-11-18T20:20:17,447 DEBUG [RS:0;5a964fc427ed:40777 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37701 2024-11-18T20:20:17,447 DEBUG [RS:0;5a964fc427ed:40777 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T20:20:17,448 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:20:17,449 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:20:17,450 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:20:17,450 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:20:17,453 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:20:17,453 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:20:17,455 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:20:17,455 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:20:17,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:20:17,457 DEBUG [RS:0;5a964fc427ed:40777 {}] zookeeper.ZKUtil(111): regionserver:40777-0x10150c899f60001, quorum=127.0.0.1:55888, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5a964fc427ed,40777,1731961215773 2024-11-18T20:20:17,457 WARN [RS:0;5a964fc427ed:40777 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:20:17,457 INFO [RS:0;5a964fc427ed:40777 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:20:17,458 DEBUG [RS:0;5a964fc427ed:40777 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773 2024-11-18T20:20:17,458 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:20:17,458 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:20:17,459 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:20:17,460 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:20:17,460 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5a964fc427ed,40777,1731961215773] 2024-11-18T20:20:17,464 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/hbase/meta/1588230740 2024-11-18T20:20:17,465 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/hbase/meta/1588230740 2024-11-18T20:20:17,468 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:20:17,468 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:20:17,469 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:20:17,472 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:20:17,476 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:20:17,477 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=718153, jitterRate=-0.08682243525981903}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:20:17,480 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731961217435Initializing all the Stores at 1731961217438 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961217438Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961217438Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961217438Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961217439 (+1 ms)Cleaning up temporary data from old regions at 1731961217468 (+29 ms)Region opened successfully at 1731961217480 (+12 ms) 2024-11-18T20:20:17,480 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:20:17,481 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:20:17,481 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:20:17,481 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:20:17,481 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:20:17,482 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:20:17,482 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961217480Disabling compacts and flushes for region at 1731961217480Disabling writes for close at 1731961217481 (+1 ms)Writing region close event to WAL at 1731961217482 (+1 ms)Closed at 1731961217482 2024-11-18T20:20:17,485 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:20:17,485 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T20:20:17,491 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T20:20:17,492 INFO [RS:0;5a964fc427ed:40777 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T20:20:17,499 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:20:17,501 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T20:20:17,511 INFO [RS:0;5a964fc427ed:40777 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T20:20:17,515 INFO [RS:0;5a964fc427ed:40777 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T20:20:17,515 INFO [RS:0;5a964fc427ed:40777 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:20:17,516 INFO [RS:0;5a964fc427ed:40777 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T20:20:17,522 INFO [RS:0;5a964fc427ed:40777 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T20:20:17,523 INFO [RS:0;5a964fc427ed:40777 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T20:20:17,523 DEBUG [RS:0;5a964fc427ed:40777 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:20:17,524 DEBUG [RS:0;5a964fc427ed:40777 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:20:17,524 DEBUG [RS:0;5a964fc427ed:40777 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:20:17,524 DEBUG [RS:0;5a964fc427ed:40777 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:20:17,524 DEBUG [RS:0;5a964fc427ed:40777 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:20:17,524 DEBUG [RS:0;5a964fc427ed:40777 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5a964fc427ed:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:20:17,524 DEBUG [RS:0;5a964fc427ed:40777 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:20:17,525 DEBUG [RS:0;5a964fc427ed:40777 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:20:17,525 DEBUG [RS:0;5a964fc427ed:40777 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:20:17,525 DEBUG [RS:0;5a964fc427ed:40777 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:20:17,525 DEBUG [RS:0;5a964fc427ed:40777 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:20:17,525 DEBUG [RS:0;5a964fc427ed:40777 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:20:17,525 DEBUG [RS:0;5a964fc427ed:40777 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5a964fc427ed:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:20:17,525 DEBUG [RS:0;5a964fc427ed:40777 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:20:17,526 INFO [RS:0;5a964fc427ed:40777 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:20:17,527 INFO [RS:0;5a964fc427ed:40777 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:20:17,527 INFO [RS:0;5a964fc427ed:40777 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:20:17,527 INFO [RS:0;5a964fc427ed:40777 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T20:20:17,527 INFO [RS:0;5a964fc427ed:40777 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T20:20:17,527 INFO [RS:0;5a964fc427ed:40777 {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,40777,1731961215773-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:20:17,544 INFO [RS:0;5a964fc427ed:40777 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T20:20:17,546 INFO [RS:0;5a964fc427ed:40777 {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,40777,1731961215773-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:20:17,547 INFO [RS:0;5a964fc427ed:40777 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:20:17,547 INFO [RS:0;5a964fc427ed:40777 {}] regionserver.Replication(171): 5a964fc427ed,40777,1731961215773 started 2024-11-18T20:20:17,565 INFO [RS:0;5a964fc427ed:40777 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:20:17,566 INFO [RS:0;5a964fc427ed:40777 {}] regionserver.HRegionServer(1482): Serving as 5a964fc427ed,40777,1731961215773, RpcServer on 5a964fc427ed/172.17.0.2:40777, sessionid=0x10150c899f60001 2024-11-18T20:20:17,567 DEBUG [RS:0;5a964fc427ed:40777 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T20:20:17,567 DEBUG [RS:0;5a964fc427ed:40777 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5a964fc427ed,40777,1731961215773 2024-11-18T20:20:17,567 DEBUG [RS:0;5a964fc427ed:40777 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5a964fc427ed,40777,1731961215773' 2024-11-18T20:20:17,567 DEBUG [RS:0;5a964fc427ed:40777 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T20:20:17,569 DEBUG [RS:0;5a964fc427ed:40777 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T20:20:17,569 DEBUG [RS:0;5a964fc427ed:40777 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T20:20:17,570 DEBUG [RS:0;5a964fc427ed:40777 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T20:20:17,570 DEBUG [RS:0;5a964fc427ed:40777 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5a964fc427ed,40777,1731961215773 2024-11-18T20:20:17,570 DEBUG [RS:0;5a964fc427ed:40777 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5a964fc427ed,40777,1731961215773' 2024-11-18T20:20:17,570 DEBUG [RS:0;5a964fc427ed:40777 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T20:20:17,571 DEBUG [RS:0;5a964fc427ed:40777 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T20:20:17,571 DEBUG [RS:0;5a964fc427ed:40777 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T20:20:17,571 INFO [RS:0;5a964fc427ed:40777 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T20:20:17,572 INFO [RS:0;5a964fc427ed:40777 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T20:20:17,653 WARN [5a964fc427ed:37233 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T20:20:17,684 INFO [RS:0;5a964fc427ed:40777 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5a964fc427ed%2C40777%2C1731961215773, suffix=, logDir=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773, archiveDir=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/oldWALs, maxLogs=32 2024-11-18T20:20:17,687 INFO [RS:0;5a964fc427ed:40777 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C40777%2C1731961215773.1731961217687 2024-11-18T20:20:17,699 INFO [RS:0;5a964fc427ed:40777 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961217687 2024-11-18T20:20:17,702 DEBUG [RS:0;5a964fc427ed:40777 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40185:40185),(127.0.0.1/127.0.0.1:43077:43077)] 2024-11-18T20:20:17,909 DEBUG [5a964fc427ed:37233 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T20:20:17,923 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5a964fc427ed,40777,1731961215773 2024-11-18T20:20:17,929 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5a964fc427ed,40777,1731961215773, state=OPENING 2024-11-18T20:20:17,965 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T20:20:17,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:20:17,974 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40777-0x10150c899f60001, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:20:17,976 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:20:17,976 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:20:17,979 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:20:17,983 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5a964fc427ed,40777,1731961215773}] 2024-11-18T20:20:18,164 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T20:20:18,168 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44519, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T20:20:18,180 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T20:20:18,180 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:20:18,184 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5a964fc427ed%2C40777%2C1731961215773.meta, suffix=.meta, logDir=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773, archiveDir=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/oldWALs, maxLogs=32 2024-11-18T20:20:18,187 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C40777%2C1731961215773.meta.1731961218186.meta 2024-11-18T20:20:18,195 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.meta.1731961218186.meta 2024-11-18T20:20:18,197 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43077:43077),(127.0.0.1/127.0.0.1:40185:40185)] 2024-11-18T20:20:18,198 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:20:18,200 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T20:20:18,203 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T20:20:18,208 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T20:20:18,212 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T20:20:18,213 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:20:18,213 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T20:20:18,213 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T20:20:18,217 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:20:18,218 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:20:18,218 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:20:18,219 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:20:18,220 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:20:18,221 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:20:18,221 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:20:18,222 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:20:18,222 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:20:18,224 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:20:18,224 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:20:18,225 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:20:18,226 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:20:18,227 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:20:18,227 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:20:18,228 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:20:18,229 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:20:18,230 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/hbase/meta/1588230740 2024-11-18T20:20:18,233 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/hbase/meta/1588230740 2024-11-18T20:20:18,236 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:20:18,236 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:20:18,237 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:20:18,240 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:20:18,242 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=800906, jitterRate=0.018405720591545105}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:20:18,243 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T20:20:18,244 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731961218214Writing region info on filesystem at 1731961218214Initializing all the Stores at 1731961218216 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961218216Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961218216Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961218216Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961218216Cleaning up temporary data from old regions at 1731961218236 (+20 ms)Running coprocessor post-open hooks at 1731961218243 (+7 ms)Region opened successfully at 1731961218244 (+1 ms) 2024-11-18T20:20:18,250 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731961218155 2024-11-18T20:20:18,263 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T20:20:18,263 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T20:20:18,264 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5a964fc427ed,40777,1731961215773 2024-11-18T20:20:18,267 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5a964fc427ed,40777,1731961215773, state=OPEN 2024-11-18T20:20:18,291 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40777-0x10150c899f60001, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:20:18,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:20:18,291 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:20:18,291 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:20:18,292 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5a964fc427ed,40777,1731961215773 2024-11-18T20:20:18,297 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T20:20:18,298 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5a964fc427ed,40777,1731961215773 in 309 msec 2024-11-18T20:20:18,305 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T20:20:18,305 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 809 msec 2024-11-18T20:20:18,307 DEBUG [PEWorker-4 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:20:18,308 INFO [PEWorker-4 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T20:20:18,325 DEBUG [PEWorker-4 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:20:18,327 DEBUG [PEWorker-4 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5a964fc427ed,40777,1731961215773, seqNum=-1] 2024-11-18T20:20:18,346 DEBUG [PEWorker-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:20:18,348 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42879, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:20:18,373 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0570 sec 2024-11-18T20:20:18,373 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731961218373, completionTime=-1 2024-11-18T20:20:18,377 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T20:20:18,377 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T20:20:18,406 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T20:20:18,406 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731961278406 2024-11-18T20:20:18,407 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731961338406 2024-11-18T20:20:18,407 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 29 msec 2024-11-18T20:20:18,411 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,37233,1731961214997-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:20:18,411 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,37233,1731961214997-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:20:18,412 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,37233,1731961214997-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:20:18,413 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5a964fc427ed:37233, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:20:18,414 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T20:20:18,415 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T20:20:18,421 DEBUG [master/5a964fc427ed:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T20:20:18,450 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.483sec 2024-11-18T20:20:18,451 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T20:20:18,453 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T20:20:18,454 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T20:20:18,454 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T20:20:18,455 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T20:20:18,455 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,37233,1731961214997-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:20:18,456 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,37233,1731961214997-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T20:20:18,466 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T20:20:18,467 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T20:20:18,468 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,37233,1731961214997-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:20:18,508 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66cb686a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:20:18,511 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-18T20:20:18,511 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-18T20:20:18,515 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5a964fc427ed,37233,-1 for getting cluster id 2024-11-18T20:20:18,517 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T20:20:18,525 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '88f44570-d6cb-4e68-8366-4df58da045ec' 2024-11-18T20:20:18,528 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T20:20:18,529 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "88f44570-d6cb-4e68-8366-4df58da045ec" 2024-11-18T20:20:18,529 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b8ae50d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:20:18,529 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5a964fc427ed,37233,-1] 2024-11-18T20:20:18,533 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T20:20:18,535 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:20:18,537 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45796, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T20:20:18,540 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32fb6022, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:20:18,541 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:20:18,548 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5a964fc427ed,40777,1731961215773, seqNum=-1] 2024-11-18T20:20:18,549 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:20:18,551 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46394, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:20:18,569 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5a964fc427ed,37233,1731961214997 2024-11-18T20:20:18,570 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:20:18,577 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T20:20:18,582 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T20:20:18,587 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5a964fc427ed,37233,1731961214997 2024-11-18T20:20:18,589 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5150e84 2024-11-18T20:20:18,590 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T20:20:18,593 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45808, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T20:20:18,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T20:20:18,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T20:20:18,598 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:20:18,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-18T20:20:18,608 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T20:20:18,610 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-18T20:20:18,610 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:20:18,613 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T20:20:18,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T20:20:18,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741835_1011 (size=389) 2024-11-18T20:20:18,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741835_1011 (size=389) 2024-11-18T20:20:18,657 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 890bff6af78f9961ad7b1712567d0a71, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578 2024-11-18T20:20:18,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741836_1012 (size=72) 2024-11-18T20:20:18,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741836_1012 (size=72) 2024-11-18T20:20:18,670 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:20:18,670 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 890bff6af78f9961ad7b1712567d0a71, disabling compactions & flushes 2024-11-18T20:20:18,670 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71. 2024-11-18T20:20:18,670 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71. 2024-11-18T20:20:18,670 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71. after waiting 0 ms 2024-11-18T20:20:18,670 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71. 2024-11-18T20:20:18,670 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71. 2024-11-18T20:20:18,670 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 890bff6af78f9961ad7b1712567d0a71: Waiting for close lock at 1731961218670Disabling compacts and flushes for region at 1731961218670Disabling writes for close at 1731961218670Writing region close event to WAL at 1731961218670Closed at 1731961218670 2024-11-18T20:20:18,672 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T20:20:18,678 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731961218672"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731961218672"}]},"ts":"1731961218672"} 2024-11-18T20:20:18,683 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T20:20:18,685 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T20:20:18,704 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731961218686"}]},"ts":"1731961218686"} 2024-11-18T20:20:18,710 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-18T20:20:18,712 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=890bff6af78f9961ad7b1712567d0a71, ASSIGN}] 2024-11-18T20:20:18,715 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=890bff6af78f9961ad7b1712567d0a71, ASSIGN 2024-11-18T20:20:18,718 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=890bff6af78f9961ad7b1712567d0a71, ASSIGN; state=OFFLINE, location=5a964fc427ed,40777,1731961215773; forceNewPlan=false, retain=false 2024-11-18T20:20:18,870 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=890bff6af78f9961ad7b1712567d0a71, regionState=OPENING, regionLocation=5a964fc427ed,40777,1731961215773 2024-11-18T20:20:18,878 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=890bff6af78f9961ad7b1712567d0a71, ASSIGN because future has completed 2024-11-18T20:20:18,881 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 890bff6af78f9961ad7b1712567d0a71, server=5a964fc427ed,40777,1731961215773}] 2024-11-18T20:20:19,043 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71. 2024-11-18T20:20:19,044 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 890bff6af78f9961ad7b1712567d0a71, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:20:19,045 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 890bff6af78f9961ad7b1712567d0a71 2024-11-18T20:20:19,045 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:20:19,045 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 890bff6af78f9961ad7b1712567d0a71 2024-11-18T20:20:19,045 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 890bff6af78f9961ad7b1712567d0a71 2024-11-18T20:20:19,048 INFO [StoreOpener-890bff6af78f9961ad7b1712567d0a71-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 890bff6af78f9961ad7b1712567d0a71 2024-11-18T20:20:19,051 INFO [StoreOpener-890bff6af78f9961ad7b1712567d0a71-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 890bff6af78f9961ad7b1712567d0a71 columnFamilyName info 2024-11-18T20:20:19,051 DEBUG [StoreOpener-890bff6af78f9961ad7b1712567d0a71-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:20:19,052 INFO [StoreOpener-890bff6af78f9961ad7b1712567d0a71-1 {}] regionserver.HStore(327): Store=890bff6af78f9961ad7b1712567d0a71/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:20:19,053 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 890bff6af78f9961ad7b1712567d0a71 2024-11-18T20:20:19,054 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71 2024-11-18T20:20:19,055 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71 2024-11-18T20:20:19,056 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 890bff6af78f9961ad7b1712567d0a71 2024-11-18T20:20:19,057 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 890bff6af78f9961ad7b1712567d0a71 2024-11-18T20:20:19,060 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 890bff6af78f9961ad7b1712567d0a71 2024-11-18T20:20:19,065 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:20:19,066 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 890bff6af78f9961ad7b1712567d0a71; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=703422, jitterRate=-0.10555306077003479}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T20:20:19,067 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 890bff6af78f9961ad7b1712567d0a71 2024-11-18T20:20:19,069 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 890bff6af78f9961ad7b1712567d0a71: Running coprocessor pre-open hook at 1731961219046Writing region info on filesystem at 1731961219046Initializing all the Stores at 1731961219047 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961219048 (+1 ms)Cleaning up temporary data from old regions at 1731961219057 (+9 ms)Running coprocessor post-open hooks at 1731961219067 (+10 ms)Region opened successfully at 1731961219068 (+1 ms) 2024-11-18T20:20:19,071 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71., pid=6, masterSystemTime=1731961219036 2024-11-18T20:20:19,077 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71. 2024-11-18T20:20:19,077 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71. 2024-11-18T20:20:19,079 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=890bff6af78f9961ad7b1712567d0a71, regionState=OPEN, openSeqNum=2, regionLocation=5a964fc427ed,40777,1731961215773 2024-11-18T20:20:19,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 890bff6af78f9961ad7b1712567d0a71, server=5a964fc427ed,40777,1731961215773 because future has completed 2024-11-18T20:20:19,095 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T20:20:19,095 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 890bff6af78f9961ad7b1712567d0a71, server=5a964fc427ed,40777,1731961215773 in 208 msec 2024-11-18T20:20:19,101 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T20:20:19,101 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=890bff6af78f9961ad7b1712567d0a71, ASSIGN in 383 msec 2024-11-18T20:20:19,103 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T20:20:19,104 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731961219103"}]},"ts":"1731961219103"} 2024-11-18T20:20:19,109 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-18T20:20:19,111 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T20:20:19,116 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 511 msec 2024-11-18T20:20:23,727 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-18T20:20:23,779 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T20:20:23,780 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-18T20:20:25,393 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T20:20:25,394 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-18T20:20:25,396 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-18T20:20:25,396 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-18T20:20:25,397 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:20:25,397 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-18T20:20:25,398 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-18T20:20:25,398 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-18T20:20:28,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37233 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T20:20:28,707 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-18T20:20:28,709 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-18T20:20:28,718 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-18T20:20:28,719 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71. 2024-11-18T20:20:28,720 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C40777%2C1731961215773.1731961228720 2024-11-18T20:20:28,731 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:20:28,731 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:20:28,731 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:20:28,731 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:20:28,732 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:20:28,732 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961217687 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961228720 2024-11-18T20:20:28,734 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40185:40185),(127.0.0.1/127.0.0.1:43077:43077)] 2024-11-18T20:20:28,734 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961217687 is not closed yet, will try archiving it next time 2024-11-18T20:20:28,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741833_1009 (size=451) 2024-11-18T20:20:28,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741833_1009 (size=451) 2024-11-18T20:20:28,739 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961217687 to hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/oldWALs/5a964fc427ed%2C40777%2C1731961215773.1731961217687 2024-11-18T20:20:28,746 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71., hostname=5a964fc427ed,40777,1731961215773, seqNum=2] 2024-11-18T20:20:40,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40777 {}] regionserver.HRegion(8855): Flush requested on 890bff6af78f9961ad7b1712567d0a71 2024-11-18T20:20:40,793 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 890bff6af78f9961ad7b1712567d0a71 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T20:20:40,869 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/.tmp/info/598ef29d9a6a4984bbd76f953f3d9344 is 1080, key is row0001/info:/1731961228750/Put/seqid=0 2024-11-18T20:20:40,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741838_1014 (size=12509) 2024-11-18T20:20:40,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741838_1014 (size=12509) 2024-11-18T20:20:40,883 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/.tmp/info/598ef29d9a6a4984bbd76f953f3d9344 2024-11-18T20:20:40,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/.tmp/info/598ef29d9a6a4984bbd76f953f3d9344 as hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/598ef29d9a6a4984bbd76f953f3d9344 2024-11-18T20:20:40,944 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/598ef29d9a6a4984bbd76f953f3d9344, entries=7, sequenceid=11, filesize=12.2 K 2024-11-18T20:20:40,952 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 890bff6af78f9961ad7b1712567d0a71 in 160ms, sequenceid=11, compaction requested=false 2024-11-18T20:20:40,953 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 890bff6af78f9961ad7b1712567d0a71: 2024-11-18T20:20:43,944 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T20:20:48,804 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C40777%2C1731961215773.1731961248804 2024-11-18T20:20:49,013 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK], DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK]] 2024-11-18T20:20:49,013 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:20:49,013 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:20:49,013 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:20:49,013 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:20:49,014 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:20:49,014 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961228720 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961248804 2024-11-18T20:20:49,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741837_1013 (size=12399) 2024-11-18T20:20:49,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741837_1013 (size=12399) 2024-11-18T20:20:49,023 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43077:43077),(127.0.0.1/127.0.0.1:40185:40185)] 2024-11-18T20:20:49,023 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961228720 is not closed yet, will try archiving it next time 2024-11-18T20:20:49,226 INFO [FSHLog-0-hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578-prefix:5a964fc427ed,40777,1731961215773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK], DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK]] 2024-11-18T20:20:51,438 INFO [FSHLog-0-hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578-prefix:5a964fc427ed,40777,1731961215773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 207 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK], DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK]] 2024-11-18T20:20:53,642 INFO [FSHLog-0-hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578-prefix:5a964fc427ed,40777,1731961215773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK], DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK]] 2024-11-18T20:20:55,849 INFO [FSHLog-0-hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578-prefix:5a964fc427ed,40777,1731961215773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK], DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK]] 2024-11-18T20:20:55,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40777 {}] regionserver.HRegion(8855): Flush requested on 890bff6af78f9961ad7b1712567d0a71 2024-11-18T20:20:55,850 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 890bff6af78f9961ad7b1712567d0a71 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T20:20:56,059 INFO [FSHLog-0-hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578-prefix:5a964fc427ed,40777,1731961215773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 208 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK], DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK]] 2024-11-18T20:20:56,071 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/.tmp/info/3c65e1d362004e97ab819a2d81f69f02 is 1080, key is row0008/info:/1731961242790/Put/seqid=0 2024-11-18T20:20:56,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741840_1016 (size=12509) 2024-11-18T20:20:56,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741840_1016 (size=12509) 2024-11-18T20:20:56,096 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/.tmp/info/3c65e1d362004e97ab819a2d81f69f02 2024-11-18T20:20:56,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/.tmp/info/3c65e1d362004e97ab819a2d81f69f02 as hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/3c65e1d362004e97ab819a2d81f69f02 2024-11-18T20:20:56,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/3c65e1d362004e97ab819a2d81f69f02, entries=7, sequenceid=21, filesize=12.2 K 2024-11-18T20:20:56,335 INFO [FSHLog-0-hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578-prefix:5a964fc427ed,40777,1731961215773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK], DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK]] 2024-11-18T20:20:56,335 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 890bff6af78f9961ad7b1712567d0a71 in 486ms, sequenceid=21, compaction requested=false 2024-11-18T20:20:56,335 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 890bff6af78f9961ad7b1712567d0a71: 2024-11-18T20:20:56,335 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-18T20:20:56,336 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:20:56,336 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/598ef29d9a6a4984bbd76f953f3d9344 because midkey is the same as first or last row 2024-11-18T20:20:58,056 INFO [FSHLog-0-hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578-prefix:5a964fc427ed,40777,1731961215773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK], DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK]] 2024-11-18T20:20:59,248 INFO [master/5a964fc427ed:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-18T20:20:59,248 INFO [master/5a964fc427ed:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-18T20:21:00,260 INFO [FSHLog-0-hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578-prefix:5a964fc427ed,40777,1731961215773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK], DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK]] 2024-11-18T20:21:00,262 WARN [FSHLog-0-hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578-prefix:5a964fc427ed,40777,1731961215773 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK], DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK]] 2024-11-18T20:21:00,263 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5a964fc427ed%2C40777%2C1731961215773:(num 1731961248804) roll requested 2024-11-18T20:21:00,264 INFO [regionserver/5a964fc427ed:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C40777%2C1731961215773.1731961260263 2024-11-18T20:21:00,471 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK], DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK]] 2024-11-18T20:21:00,472 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:00,472 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:00,472 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:00,472 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:00,472 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:00,472 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961248804 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961260263 2024-11-18T20:21:00,473 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40185:40185),(127.0.0.1/127.0.0.1:43077:43077)] 2024-11-18T20:21:00,473 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961248804 is not closed yet, will try archiving it next time 2024-11-18T20:21:00,473 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961228720 to hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/oldWALs/5a964fc427ed%2C40777%2C1731961215773.1731961228720 2024-11-18T20:21:00,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741839_1015 (size=7739) 2024-11-18T20:21:00,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741839_1015 (size=7739) 2024-11-18T20:21:02,466 INFO [FSHLog-0-hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578-prefix:5a964fc427ed,40777,1731961215773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 203 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK], DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK]] 2024-11-18T20:21:04,045 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 890bff6af78f9961ad7b1712567d0a71, had cached 0 bytes from a total of 25018 2024-11-18T20:21:04,672 INFO [FSHLog-0-hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578-prefix:5a964fc427ed,40777,1731961215773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK], DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK]] 2024-11-18T20:21:06,877 INFO [FSHLog-0-hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578-prefix:5a964fc427ed,40777,1731961215773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK], DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK]] 2024-11-18T20:21:09,082 INFO [FSHLog-0-hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578-prefix:5a964fc427ed,40777,1731961215773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK], DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK]] 2024-11-18T20:21:11,085 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-18T20:21:11,085 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C40777%2C1731961215773.1731961271085 2024-11-18T20:21:13,944 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T20:21:16,151 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5056 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK], DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK]] 2024-11-18T20:21:16,153 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5056 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK], DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK]] 2024-11-18T20:21:16,153 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5a964fc427ed%2C40777%2C1731961215773:(num 1731961271085) roll requested 2024-11-18T20:21:16,153 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:16,153 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:16,153 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:16,153 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:16,153 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:16,154 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961260263 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961271085 2024-11-18T20:21:16,155 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40185:40185),(127.0.0.1/127.0.0.1:43077:43077)] 2024-11-18T20:21:16,155 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961260263 is not closed yet, will try archiving it next time 2024-11-18T20:21:16,156 INFO [regionserver/5a964fc427ed:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C40777%2C1731961215773.1731961276156 2024-11-18T20:21:16,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741841_1017 (size=4753) 2024-11-18T20:21:16,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741841_1017 (size=4753) 2024-11-18T20:21:21,174 INFO [FSHLog-0-hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578-prefix:5a964fc427ed,40777,1731961215773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK], DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK]] 2024-11-18T20:21:21,175 WARN [FSHLog-0-hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578-prefix:5a964fc427ed,40777,1731961215773 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK], DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK]] 2024-11-18T20:21:21,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40777 {}] regionserver.HRegion(8855): Flush requested on 890bff6af78f9961ad7b1712567d0a71 2024-11-18T20:21:21,175 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 890bff6af78f9961ad7b1712567d0a71 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T20:21:21,182 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5010 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK], DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK]] 2024-11-18T20:21:21,182 WARN [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5010 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK], DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK]] 2024-11-18T20:21:23,176 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-18T20:21:26,177 INFO [FSHLog-0-hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578-prefix:5a964fc427ed,40777,1731961215773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK], DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK]] 2024-11-18T20:21:26,177 WARN [FSHLog-0-hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578-prefix:5a964fc427ed,40777,1731961215773 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39627,DS-29660b43-ddb0-4066-9faa-41155cd64011,DISK], DatanodeInfoWithStorage[127.0.0.1:43521,DS-635bcde8-50a6-4cab-8c70-55a5831b63ab,DISK]] 2024-11-18T20:21:26,178 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:26,178 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:26,178 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:26,178 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:26,178 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:26,179 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961271085 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961276156 2024-11-18T20:21:26,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741842_1018 (size=1569) 2024-11-18T20:21:26,182 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43077:43077),(127.0.0.1/127.0.0.1:40185:40185)] 2024-11-18T20:21:26,182 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961271085 is not closed yet, will try archiving it next time 2024-11-18T20:21:26,182 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5a964fc427ed%2C40777%2C1731961215773:(num 1731961276156) roll requested 2024-11-18T20:21:26,183 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C40777%2C1731961215773.1731961286182 2024-11-18T20:21:26,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741842_1018 (size=1569) 2024-11-18T20:21:26,188 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/.tmp/info/bb5f6d1821e84296a577c671a295465a is 1080, key is row0015/info:/1731961257853/Put/seqid=0 2024-11-18T20:21:26,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741844_1020 (size=12509) 2024-11-18T20:21:26,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741844_1020 (size=12509) 2024-11-18T20:21:26,225 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/.tmp/info/bb5f6d1821e84296a577c671a295465a 2024-11-18T20:21:26,234 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:26,234 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:26,234 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:26,234 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:26,234 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:26,234 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961276156 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961286182 2024-11-18T20:21:26,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741843_1019 (size=93) 2024-11-18T20:21:26,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741843_1019 (size=93) 2024-11-18T20:21:26,238 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961276156 to hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/oldWALs/5a964fc427ed%2C40777%2C1731961215773.1731961276156 2024-11-18T20:21:26,245 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43077:43077),(127.0.0.1/127.0.0.1:40185:40185)] 2024-11-18T20:21:26,247 INFO [regionserver/5a964fc427ed:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C40777%2C1731961215773.1731961286246 2024-11-18T20:21:26,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/.tmp/info/bb5f6d1821e84296a577c671a295465a as hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/bb5f6d1821e84296a577c671a295465a 2024-11-18T20:21:26,260 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:26,260 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:26,261 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:26,261 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:26,261 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:26,261 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961286182 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/WALs/5a964fc427ed,40777,1731961215773/5a964fc427ed%2C40777%2C1731961215773.1731961286246 2024-11-18T20:21:26,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741845_1021 (size=1258) 2024-11-18T20:21:26,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741845_1021 (size=1258) 2024-11-18T20:21:26,265 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/bb5f6d1821e84296a577c671a295465a, entries=7, sequenceid=31, filesize=12.2 K 2024-11-18T20:21:26,275 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43077:43077),(127.0.0.1/127.0.0.1:40185:40185)] 2024-11-18T20:21:26,277 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=1.05 KB/1076 for 890bff6af78f9961ad7b1712567d0a71 in 5101ms, sequenceid=31, compaction requested=true 2024-11-18T20:21:26,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 890bff6af78f9961ad7b1712567d0a71: 2024-11-18T20:21:26,277 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-18T20:21:26,277 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:21:26,277 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/598ef29d9a6a4984bbd76f953f3d9344 because midkey is the same as first or last row 2024-11-18T20:21:26,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 890bff6af78f9961ad7b1712567d0a71:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:21:26,285 DEBUG [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:21:26,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:21:26,289 DEBUG [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:21:26,291 DEBUG [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] regionserver.HStore(1541): 890bff6af78f9961ad7b1712567d0a71/info is initiating minor compaction (all files) 2024-11-18T20:21:26,291 INFO [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 890bff6af78f9961ad7b1712567d0a71/info in TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71. 2024-11-18T20:21:26,292 INFO [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/598ef29d9a6a4984bbd76f953f3d9344, hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/3c65e1d362004e97ab819a2d81f69f02, hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/bb5f6d1821e84296a577c671a295465a] into tmpdir=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/.tmp, totalSize=36.6 K 2024-11-18T20:21:26,294 DEBUG [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] compactions.Compactor(225): Compacting 598ef29d9a6a4984bbd76f953f3d9344, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731961228750 2024-11-18T20:21:26,296 DEBUG [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3c65e1d362004e97ab819a2d81f69f02, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731961242790 2024-11-18T20:21:26,297 DEBUG [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] compactions.Compactor(225): Compacting bb5f6d1821e84296a577c671a295465a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731961257853 2024-11-18T20:21:26,348 INFO [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 890bff6af78f9961ad7b1712567d0a71#info#compaction#3 average throughput is 7.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:21:26,351 DEBUG [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/.tmp/info/992bf3f719ef47e8ac4ebfc579700ded is 1080, key is row0001/info:/1731961228750/Put/seqid=0 2024-11-18T20:21:26,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741847_1023 (size=27710) 2024-11-18T20:21:26,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741847_1023 (size=27710) 2024-11-18T20:21:26,392 DEBUG [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/.tmp/info/992bf3f719ef47e8ac4ebfc579700ded as hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/992bf3f719ef47e8ac4ebfc579700ded 2024-11-18T20:21:26,425 INFO [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 890bff6af78f9961ad7b1712567d0a71/info of 890bff6af78f9961ad7b1712567d0a71 into 992bf3f719ef47e8ac4ebfc579700ded(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:21:26,426 DEBUG [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 890bff6af78f9961ad7b1712567d0a71: 2024-11-18T20:21:26,428 INFO [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71., storeName=890bff6af78f9961ad7b1712567d0a71/info, priority=13, startTime=1731961286279; duration=0sec 2024-11-18T20:21:26,429 DEBUG [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-18T20:21:26,429 DEBUG [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:21:26,429 DEBUG [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/992bf3f719ef47e8ac4ebfc579700ded because midkey is the same as first or last row 2024-11-18T20:21:26,429 DEBUG [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-18T20:21:26,429 DEBUG [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:21:26,429 DEBUG [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/992bf3f719ef47e8ac4ebfc579700ded because midkey is the same as first or last row 2024-11-18T20:21:26,430 DEBUG [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-18T20:21:26,430 DEBUG [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:21:26,430 DEBUG [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/992bf3f719ef47e8ac4ebfc579700ded because midkey is the same as first or last row 2024-11-18T20:21:26,430 DEBUG [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:21:26,430 DEBUG [RS:0;5a964fc427ed:40777-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 890bff6af78f9961ad7b1712567d0a71:info 2024-11-18T20:21:38,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40777 {}] regionserver.HRegion(8855): Flush requested on 890bff6af78f9961ad7b1712567d0a71 2024-11-18T20:21:38,279 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 890bff6af78f9961ad7b1712567d0a71 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T20:21:38,328 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/.tmp/info/a46501da2ac64888ad5d141a2931078b is 1080, key is row0022/info:/1731961286248/Put/seqid=0 2024-11-18T20:21:38,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741848_1024 (size=12509) 2024-11-18T20:21:38,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741848_1024 (size=12509) 2024-11-18T20:21:38,378 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/.tmp/info/a46501da2ac64888ad5d141a2931078b 2024-11-18T20:21:38,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/.tmp/info/a46501da2ac64888ad5d141a2931078b as hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/a46501da2ac64888ad5d141a2931078b 2024-11-18T20:21:38,453 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/a46501da2ac64888ad5d141a2931078b, entries=7, sequenceid=42, filesize=12.2 K 2024-11-18T20:21:38,468 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 890bff6af78f9961ad7b1712567d0a71 in 190ms, sequenceid=42, compaction requested=false 2024-11-18T20:21:38,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 890bff6af78f9961ad7b1712567d0a71: 2024-11-18T20:21:38,469 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-18T20:21:38,469 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:21:38,469 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/992bf3f719ef47e8ac4ebfc579700ded because midkey is the same as first or last row 2024-11-18T20:21:43,944 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T20:21:46,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T20:21:46,321 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:21:46,321 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:21:46,328 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:21:46,328 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:21:46,328 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T20:21:46,329 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T20:21:46,329 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1319998913, stopped=false 2024-11-18T20:21:46,329 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5a964fc427ed,37233,1731961214997 2024-11-18T20:21:46,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:21:46,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:46,360 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:21:46,360 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40777-0x10150c899f60001, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:21:46,360 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40777-0x10150c899f60001, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:46,360 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:21:46,361 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:21:46,361 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:21:46,361 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:21:46,361 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40777-0x10150c899f60001, quorum=127.0.0.1:55888, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:21:46,361 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5a964fc427ed,40777,1731961215773' ***** 2024-11-18T20:21:46,361 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T20:21:46,362 INFO [RS:0;5a964fc427ed:40777 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T20:21:46,362 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T20:21:46,362 INFO [RS:0;5a964fc427ed:40777 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T20:21:46,362 INFO [RS:0;5a964fc427ed:40777 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T20:21:46,362 INFO [RS:0;5a964fc427ed:40777 {}] regionserver.HRegionServer(3091): Received CLOSE for 890bff6af78f9961ad7b1712567d0a71 2024-11-18T20:21:46,363 INFO [RS:0;5a964fc427ed:40777 {}] regionserver.HRegionServer(959): stopping server 5a964fc427ed,40777,1731961215773 2024-11-18T20:21:46,363 INFO [RS:0;5a964fc427ed:40777 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:21:46,363 INFO [RS:0;5a964fc427ed:40777 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5a964fc427ed:40777. 2024-11-18T20:21:46,363 DEBUG [RS:0;5a964fc427ed:40777 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:21:46,363 DEBUG [RS:0;5a964fc427ed:40777 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:21:46,363 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 890bff6af78f9961ad7b1712567d0a71, disabling compactions & flushes 2024-11-18T20:21:46,363 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71. 2024-11-18T20:21:46,363 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71. 2024-11-18T20:21:46,364 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71. after waiting 0 ms 2024-11-18T20:21:46,364 INFO [RS:0;5a964fc427ed:40777 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T20:21:46,364 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71. 2024-11-18T20:21:46,364 INFO [RS:0;5a964fc427ed:40777 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T20:21:46,364 INFO [RS:0;5a964fc427ed:40777 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T20:21:46,364 INFO [RS:0;5a964fc427ed:40777 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T20:21:46,364 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 890bff6af78f9961ad7b1712567d0a71 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-18T20:21:46,366 INFO [RS:0;5a964fc427ed:40777 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-18T20:21:46,366 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:21:46,366 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:21:46,366 DEBUG [RS:0;5a964fc427ed:40777 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 890bff6af78f9961ad7b1712567d0a71=TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71.} 2024-11-18T20:21:46,366 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:21:46,366 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:21:46,366 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:21:46,366 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-18T20:21:46,366 DEBUG [RS:0;5a964fc427ed:40777 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 890bff6af78f9961ad7b1712567d0a71 2024-11-18T20:21:46,372 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/.tmp/info/fd3618f6eaae4090aa6c345dfb2bf447 is 1080, key is row0029/info:/1731961300289/Put/seqid=0 2024-11-18T20:21:46,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741849_1025 (size=8193) 2024-11-18T20:21:46,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741849_1025 (size=8193) 2024-11-18T20:21:46,387 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/.tmp/info/fd3618f6eaae4090aa6c345dfb2bf447 2024-11-18T20:21:46,398 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/hbase/meta/1588230740/.tmp/info/81f0c66e54594a65a091ef176f5c88ec is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71./info:regioninfo/1731961219079/Put/seqid=0 2024-11-18T20:21:46,399 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/.tmp/info/fd3618f6eaae4090aa6c345dfb2bf447 as hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/fd3618f6eaae4090aa6c345dfb2bf447 2024-11-18T20:21:46,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741850_1026 (size=7016) 2024-11-18T20:21:46,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741850_1026 (size=7016) 2024-11-18T20:21:46,411 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/fd3618f6eaae4090aa6c345dfb2bf447, entries=3, sequenceid=48, filesize=8.0 K 2024-11-18T20:21:46,413 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 890bff6af78f9961ad7b1712567d0a71 in 48ms, sequenceid=48, compaction requested=true 2024-11-18T20:21:46,414 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/598ef29d9a6a4984bbd76f953f3d9344, hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/3c65e1d362004e97ab819a2d81f69f02, hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/bb5f6d1821e84296a577c671a295465a] to archive 2024-11-18T20:21:46,415 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/hbase/meta/1588230740/.tmp/info/81f0c66e54594a65a091ef176f5c88ec 2024-11-18T20:21:46,419 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T20:21:46,423 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/598ef29d9a6a4984bbd76f953f3d9344 to hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/archive/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/598ef29d9a6a4984bbd76f953f3d9344 2024-11-18T20:21:46,426 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/3c65e1d362004e97ab819a2d81f69f02 to hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/archive/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/3c65e1d362004e97ab819a2d81f69f02 2024-11-18T20:21:46,428 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/bb5f6d1821e84296a577c671a295465a to hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/archive/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/info/bb5f6d1821e84296a577c671a295465a 2024-11-18T20:21:46,444 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/hbase/meta/1588230740/.tmp/ns/ee5ed6881e494acf8f7c8c463944e5cf is 43, key is default/ns:d/1731961218354/Put/seqid=0 2024-11-18T20:21:46,441 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5a964fc427ed:37233 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-18T20:21:46,452 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [598ef29d9a6a4984bbd76f953f3d9344=12509, 3c65e1d362004e97ab819a2d81f69f02=12509, bb5f6d1821e84296a577c671a295465a=12509] 2024-11-18T20:21:46,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741851_1027 (size=5153) 2024-11-18T20:21:46,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741851_1027 (size=5153) 2024-11-18T20:21:46,469 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/hbase/meta/1588230740/.tmp/ns/ee5ed6881e494acf8f7c8c463944e5cf 2024-11-18T20:21:46,481 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/default/TestLogRolling-testSlowSyncLogRolling/890bff6af78f9961ad7b1712567d0a71/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-18T20:21:46,485 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71. 2024-11-18T20:21:46,485 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 890bff6af78f9961ad7b1712567d0a71: Waiting for close lock at 1731961306363Running coprocessor pre-close hooks at 1731961306363Disabling compacts and flushes for region at 1731961306363Disabling writes for close at 1731961306364 (+1 ms)Obtaining lock to block concurrent updates at 1731961306364Preparing flush snapshotting stores in 890bff6af78f9961ad7b1712567d0a71 at 1731961306364Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731961306364Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71. at 1731961306366 (+2 ms)Flushing 890bff6af78f9961ad7b1712567d0a71/info: creating writer at 1731961306366Flushing 890bff6af78f9961ad7b1712567d0a71/info: appending metadata at 1731961306371 (+5 ms)Flushing 890bff6af78f9961ad7b1712567d0a71/info: closing flushed file at 1731961306371Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@588fc44a: reopening flushed file at 1731961306398 (+27 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 890bff6af78f9961ad7b1712567d0a71 in 48ms, sequenceid=48, compaction requested=true at 1731961306413 (+15 ms)Writing region close event to WAL at 1731961306459 (+46 ms)Running coprocessor post-close hooks at 1731961306483 (+24 ms)Closed at 1731961306485 (+2 ms) 2024-11-18T20:21:46,486 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731961218594.890bff6af78f9961ad7b1712567d0a71. 2024-11-18T20:21:46,516 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/hbase/meta/1588230740/.tmp/table/f58ca2ab0c654257b9d072a632ab4a2b is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731961219103/Put/seqid=0 2024-11-18T20:21:46,527 INFO [regionserver/5a964fc427ed:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T20:21:46,527 INFO [regionserver/5a964fc427ed:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T20:21:46,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741852_1028 (size=5396) 2024-11-18T20:21:46,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741852_1028 (size=5396) 2024-11-18T20:21:46,551 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/hbase/meta/1588230740/.tmp/table/f58ca2ab0c654257b9d072a632ab4a2b 2024-11-18T20:21:46,564 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/hbase/meta/1588230740/.tmp/info/81f0c66e54594a65a091ef176f5c88ec as hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/hbase/meta/1588230740/info/81f0c66e54594a65a091ef176f5c88ec 2024-11-18T20:21:46,567 DEBUG [RS:0;5a964fc427ed:40777 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T20:21:46,576 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/hbase/meta/1588230740/info/81f0c66e54594a65a091ef176f5c88ec, entries=10, sequenceid=11, filesize=6.9 K 2024-11-18T20:21:46,579 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/hbase/meta/1588230740/.tmp/ns/ee5ed6881e494acf8f7c8c463944e5cf as hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/hbase/meta/1588230740/ns/ee5ed6881e494acf8f7c8c463944e5cf 2024-11-18T20:21:46,589 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/hbase/meta/1588230740/ns/ee5ed6881e494acf8f7c8c463944e5cf, entries=2, sequenceid=11, filesize=5.0 K 2024-11-18T20:21:46,590 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/hbase/meta/1588230740/.tmp/table/f58ca2ab0c654257b9d072a632ab4a2b as hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/hbase/meta/1588230740/table/f58ca2ab0c654257b9d072a632ab4a2b 2024-11-18T20:21:46,603 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/hbase/meta/1588230740/table/f58ca2ab0c654257b9d072a632ab4a2b, entries=2, sequenceid=11, filesize=5.3 K 2024-11-18T20:21:46,605 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 239ms, sequenceid=11, compaction requested=false 2024-11-18T20:21:46,612 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-18T20:21:46,613 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:21:46,614 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:21:46,614 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961306365Running coprocessor pre-close hooks at 1731961306365Disabling compacts and flushes for region at 1731961306365Disabling writes for close at 1731961306366 (+1 ms)Obtaining lock to block concurrent updates at 1731961306366Preparing flush snapshotting stores in 1588230740 at 1731961306366Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731961306367 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731961306368 (+1 ms)Flushing 1588230740/info: creating writer at 1731961306368Flushing 1588230740/info: appending metadata at 1731961306397 (+29 ms)Flushing 1588230740/info: closing flushed file at 1731961306397Flushing 1588230740/ns: creating writer at 1731961306425 (+28 ms)Flushing 1588230740/ns: appending metadata at 1731961306443 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1731961306443Flushing 1588230740/table: creating writer at 1731961306482 (+39 ms)Flushing 1588230740/table: appending metadata at 1731961306515 (+33 ms)Flushing 1588230740/table: closing flushed file at 1731961306515Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1f90a6cc: reopening flushed file at 1731961306563 (+48 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3489c709: reopening flushed file at 1731961306577 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@273c4abd: reopening flushed file at 1731961306589 (+12 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 239ms, sequenceid=11, compaction requested=false at 1731961306605 (+16 ms)Writing region close event to WAL at 1731961306607 (+2 ms)Running coprocessor post-close hooks at 1731961306613 (+6 ms)Closed at 1731961306613 2024-11-18T20:21:46,614 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T20:21:46,767 INFO [RS:0;5a964fc427ed:40777 {}] regionserver.HRegionServer(976): stopping server 5a964fc427ed,40777,1731961215773; all regions closed. 2024-11-18T20:21:46,769 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:46,769 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:46,769 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:46,769 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:46,770 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:46,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741834_1010 (size=3066) 2024-11-18T20:21:46,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741834_1010 (size=3066) 2024-11-18T20:21:46,777 DEBUG [RS:0;5a964fc427ed:40777 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/oldWALs 2024-11-18T20:21:46,778 INFO [RS:0;5a964fc427ed:40777 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5a964fc427ed%2C40777%2C1731961215773.meta:.meta(num 1731961218186) 2024-11-18T20:21:46,778 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:46,778 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:46,778 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:46,779 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:46,779 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:46,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741846_1022 (size=13040) 2024-11-18T20:21:46,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741846_1022 (size=13040) 2024-11-18T20:21:46,791 DEBUG [RS:0;5a964fc427ed:40777 {}] wal.AbstractFSWAL(1256): Moved 5 WAL file(s) to /user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/oldWALs 2024-11-18T20:21:46,792 INFO [RS:0;5a964fc427ed:40777 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5a964fc427ed%2C40777%2C1731961215773:(num 1731961286246) 2024-11-18T20:21:46,792 DEBUG [RS:0;5a964fc427ed:40777 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:21:46,792 INFO [RS:0;5a964fc427ed:40777 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:21:46,792 INFO [RS:0;5a964fc427ed:40777 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:21:46,792 INFO [RS:0;5a964fc427ed:40777 {}] hbase.ChoreService(370): Chore service for: regionserver/5a964fc427ed:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T20:21:46,792 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:21:46,793 INFO [RS:0;5a964fc427ed:40777 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:21:46,793 INFO [RS:0;5a964fc427ed:40777 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40777 2024-11-18T20:21:46,807 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40777-0x10150c899f60001, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5a964fc427ed,40777,1731961215773 2024-11-18T20:21:46,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:21:46,807 INFO [RS:0;5a964fc427ed:40777 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:21:46,808 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5a964fc427ed,40777,1731961215773] 2024-11-18T20:21:46,827 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5a964fc427ed,40777,1731961215773 already deleted, retry=false 2024-11-18T20:21:46,827 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5a964fc427ed,40777,1731961215773 expired; onlineServers=0 2024-11-18T20:21:46,827 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5a964fc427ed,37233,1731961214997' ***** 2024-11-18T20:21:46,827 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T20:21:46,827 INFO [M:0;5a964fc427ed:37233 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:21:46,827 INFO [M:0;5a964fc427ed:37233 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:21:46,828 DEBUG [M:0;5a964fc427ed:37233 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T20:21:46,828 DEBUG [M:0;5a964fc427ed:37233 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T20:21:46,828 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T20:21:46,828 DEBUG [master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.large.0-1731961217402 {}] cleaner.HFileCleaner(306): Exit Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.large.0-1731961217402,5,FailOnTimeoutGroup] 2024-11-18T20:21:46,828 DEBUG [master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.small.0-1731961217404 {}] cleaner.HFileCleaner(306): Exit Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.small.0-1731961217404,5,FailOnTimeoutGroup] 2024-11-18T20:21:46,828 INFO [M:0;5a964fc427ed:37233 {}] hbase.ChoreService(370): Chore service for: master/5a964fc427ed:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T20:21:46,828 INFO [M:0;5a964fc427ed:37233 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:21:46,828 DEBUG [M:0;5a964fc427ed:37233 {}] master.HMaster(1795): Stopping service threads 2024-11-18T20:21:46,828 INFO [M:0;5a964fc427ed:37233 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T20:21:46,829 INFO [M:0;5a964fc427ed:37233 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:21:46,829 INFO [M:0;5a964fc427ed:37233 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T20:21:46,829 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T20:21:46,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T20:21:46,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:46,839 DEBUG [M:0;5a964fc427ed:37233 {}] zookeeper.ZKUtil(347): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T20:21:46,839 WARN [M:0;5a964fc427ed:37233 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T20:21:46,848 INFO [M:0;5a964fc427ed:37233 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/.lastflushedseqids 2024-11-18T20:21:46,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741853_1029 (size=130) 2024-11-18T20:21:46,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741853_1029 (size=130) 2024-11-18T20:21:46,872 INFO [M:0;5a964fc427ed:37233 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T20:21:46,872 INFO [M:0;5a964fc427ed:37233 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T20:21:46,872 DEBUG [M:0;5a964fc427ed:37233 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:21:46,873 INFO [M:0;5a964fc427ed:37233 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:21:46,873 DEBUG [M:0;5a964fc427ed:37233 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:21:46,873 DEBUG [M:0;5a964fc427ed:37233 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:21:46,873 DEBUG [M:0;5a964fc427ed:37233 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:21:46,873 INFO [M:0;5a964fc427ed:37233 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-18T20:21:46,895 DEBUG [M:0;5a964fc427ed:37233 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2976d4c2f52f44e5b53f9ad92a5064bb is 82, key is hbase:meta,,1/info:regioninfo/1731961218264/Put/seqid=0 2024-11-18T20:21:46,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741854_1030 (size=5672) 2024-11-18T20:21:46,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741854_1030 (size=5672) 2024-11-18T20:21:46,919 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40777-0x10150c899f60001, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:21:46,919 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40777-0x10150c899f60001, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:21:46,920 INFO [RS:0;5a964fc427ed:40777 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:21:46,920 INFO [RS:0;5a964fc427ed:40777 {}] regionserver.HRegionServer(1031): Exiting; stopping=5a964fc427ed,40777,1731961215773; zookeeper connection closed. 2024-11-18T20:21:46,920 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3f3d828f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3f3d828f 2024-11-18T20:21:46,921 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T20:21:47,311 INFO [M:0;5a964fc427ed:37233 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2976d4c2f52f44e5b53f9ad92a5064bb 2024-11-18T20:21:47,343 DEBUG [M:0;5a964fc427ed:37233 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/49c1c7d7706549769cc8344f509ab283 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731961219114/Put/seqid=0 2024-11-18T20:21:47,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741855_1031 (size=6247) 2024-11-18T20:21:47,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741855_1031 (size=6247) 2024-11-18T20:21:47,536 INFO [regionserver/5a964fc427ed:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:21:47,756 INFO [M:0;5a964fc427ed:37233 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/49c1c7d7706549769cc8344f509ab283 2024-11-18T20:21:47,764 INFO [M:0;5a964fc427ed:37233 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 49c1c7d7706549769cc8344f509ab283 2024-11-18T20:21:47,779 DEBUG [M:0;5a964fc427ed:37233 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4ded6e7720f541e784d04a983e5df226 is 69, key is 5a964fc427ed,40777,1731961215773/rs:state/1731961217430/Put/seqid=0 2024-11-18T20:21:47,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741856_1032 (size=5156) 2024-11-18T20:21:47,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741856_1032 (size=5156) 2024-11-18T20:21:47,787 INFO [M:0;5a964fc427ed:37233 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4ded6e7720f541e784d04a983e5df226 2024-11-18T20:21:47,810 DEBUG [M:0;5a964fc427ed:37233 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0421487e5ba746829619e840248ac102 is 52, key is load_balancer_on/state:d/1731961218574/Put/seqid=0 2024-11-18T20:21:47,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741857_1033 (size=5056) 2024-11-18T20:21:47,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741857_1033 (size=5056) 2024-11-18T20:21:47,820 INFO [M:0;5a964fc427ed:37233 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0421487e5ba746829619e840248ac102 2024-11-18T20:21:47,834 DEBUG [M:0;5a964fc427ed:37233 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2976d4c2f52f44e5b53f9ad92a5064bb as hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2976d4c2f52f44e5b53f9ad92a5064bb 2024-11-18T20:21:47,849 INFO [M:0;5a964fc427ed:37233 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2976d4c2f52f44e5b53f9ad92a5064bb, entries=8, sequenceid=59, filesize=5.5 K 2024-11-18T20:21:47,851 DEBUG [M:0;5a964fc427ed:37233 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/49c1c7d7706549769cc8344f509ab283 as hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/49c1c7d7706549769cc8344f509ab283 2024-11-18T20:21:47,860 INFO [M:0;5a964fc427ed:37233 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 49c1c7d7706549769cc8344f509ab283 2024-11-18T20:21:47,860 INFO [M:0;5a964fc427ed:37233 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/49c1c7d7706549769cc8344f509ab283, entries=6, sequenceid=59, filesize=6.1 K 2024-11-18T20:21:47,862 DEBUG [M:0;5a964fc427ed:37233 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4ded6e7720f541e784d04a983e5df226 as hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4ded6e7720f541e784d04a983e5df226 2024-11-18T20:21:47,873 INFO [M:0;5a964fc427ed:37233 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4ded6e7720f541e784d04a983e5df226, entries=1, sequenceid=59, filesize=5.0 K 2024-11-18T20:21:47,875 DEBUG [M:0;5a964fc427ed:37233 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0421487e5ba746829619e840248ac102 as hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0421487e5ba746829619e840248ac102 2024-11-18T20:21:47,884 INFO [M:0;5a964fc427ed:37233 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0421487e5ba746829619e840248ac102, entries=1, sequenceid=59, filesize=4.9 K 2024-11-18T20:21:47,886 INFO [M:0;5a964fc427ed:37233 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1013ms, sequenceid=59, compaction requested=false 2024-11-18T20:21:47,888 INFO [M:0;5a964fc427ed:37233 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:21:47,888 DEBUG [M:0;5a964fc427ed:37233 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961306872Disabling compacts and flushes for region at 1731961306872Disabling writes for close at 1731961306873 (+1 ms)Obtaining lock to block concurrent updates at 1731961306873Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731961306873Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731961306874 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731961306875 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731961306876 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731961306895 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731961306895Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731961307320 (+425 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731961307343 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731961307343Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731961307764 (+421 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731961307779 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731961307779Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731961307794 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731961307810 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731961307810Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@52484bb0: reopening flushed file at 1731961307832 (+22 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e1870c1: reopening flushed file at 1731961307849 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@503e3278: reopening flushed file at 1731961307861 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@63405191: reopening flushed file at 1731961307874 (+13 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1013ms, sequenceid=59, compaction requested=false at 1731961307886 (+12 ms)Writing region close event to WAL at 1731961307888 (+2 ms)Closed at 1731961307888 2024-11-18T20:21:47,889 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:47,889 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:47,889 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:47,889 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:47,889 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:47,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43521 is added to blk_1073741830_1006 (size=27973) 2024-11-18T20:21:47,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39627 is added to blk_1073741830_1006 (size=27973) 2024-11-18T20:21:47,893 INFO [M:0;5a964fc427ed:37233 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T20:21:47,893 INFO [M:0;5a964fc427ed:37233 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37233 2024-11-18T20:21:47,893 INFO [M:0;5a964fc427ed:37233 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:21:47,893 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:21:48,002 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:21:48,002 INFO [M:0;5a964fc427ed:37233 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:21:48,002 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37233-0x10150c899f60000, quorum=127.0.0.1:55888, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:21:48,007 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:21:48,010 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:21:48,010 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:21:48,010 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:21:48,011 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/hadoop.log.dir/,STOPPED} 2024-11-18T20:21:48,015 WARN [BP-489932828-172.17.0.2-1731961210518 heartbeating to localhost/127.0.0.1:37701 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:21:48,015 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:21:48,015 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:21:48,015 WARN [BP-489932828-172.17.0.2-1731961210518 heartbeating to localhost/127.0.0.1:37701 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-489932828-172.17.0.2-1731961210518 (Datanode Uuid 1a7fd30e-06b3-4141-9e41-761750e8e4bc) service to localhost/127.0.0.1:37701 2024-11-18T20:21:48,017 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/cluster_7d761752-b0e6-23b7-8e41-dacf9d8650e1/data/data3/current/BP-489932828-172.17.0.2-1731961210518 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:21:48,017 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/cluster_7d761752-b0e6-23b7-8e41-dacf9d8650e1/data/data4/current/BP-489932828-172.17.0.2-1731961210518 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:21:48,018 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:21:48,020 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:21:48,020 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:21:48,020 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:21:48,020 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:21:48,021 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/hadoop.log.dir/,STOPPED} 2024-11-18T20:21:48,022 WARN [BP-489932828-172.17.0.2-1731961210518 heartbeating to localhost/127.0.0.1:37701 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:21:48,022 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:21:48,022 WARN [BP-489932828-172.17.0.2-1731961210518 heartbeating to localhost/127.0.0.1:37701 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-489932828-172.17.0.2-1731961210518 (Datanode Uuid dcb5832a-ce8d-4cae-8f01-c13c366be491) service to localhost/127.0.0.1:37701 2024-11-18T20:21:48,022 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:21:48,023 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/cluster_7d761752-b0e6-23b7-8e41-dacf9d8650e1/data/data1/current/BP-489932828-172.17.0.2-1731961210518 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:21:48,023 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/cluster_7d761752-b0e6-23b7-8e41-dacf9d8650e1/data/data2/current/BP-489932828-172.17.0.2-1731961210518 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:21:48,024 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:21:48,032 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:21:48,033 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:21:48,033 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:21:48,033 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:21:48,034 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/hadoop.log.dir/,STOPPED} 2024-11-18T20:21:48,045 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T20:21:48,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T20:21:48,098 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=80 (was 12) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37701 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37701 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@4b736004 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37701 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: master/5a964fc427ed:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/5a964fc427ed:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37701 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: regionserver/5a964fc427ed:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37701 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37701 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:37701 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37701 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=648 (was 1030), ProcessCount=11 (was 11), AvailableMemoryMB=4105 (was 5003) 2024-11-18T20:21:48,105 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=81, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=648, ProcessCount=11, AvailableMemoryMB=4105 2024-11-18T20:21:48,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T20:21:48,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/hadoop.log.dir so I do NOT create it in target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de 2024-11-18T20:21:48,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfedb1f2-6895-7bdd-b8c6-03632ac9e5d4/hadoop.tmp.dir so I do NOT create it in target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de 2024-11-18T20:21:48,106 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/cluster_d4020fe5-de2a-0486-a1e8-3d594434698a, deleteOnExit=true 2024-11-18T20:21:48,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T20:21:48,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/test.cache.data in system properties and HBase conf 2024-11-18T20:21:48,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T20:21:48,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/hadoop.log.dir in system properties and HBase conf 2024-11-18T20:21:48,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T20:21:48,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T20:21:48,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T20:21:48,107 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T20:21:48,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:21:48,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:21:48,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T20:21:48,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:21:48,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T20:21:48,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T20:21:48,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:21:48,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:21:48,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T20:21:48,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/nfs.dump.dir in system properties and HBase conf 2024-11-18T20:21:48,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/java.io.tmpdir in system properties and HBase conf 2024-11-18T20:21:48,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:21:48,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T20:21:48,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T20:21:48,124 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:21:48,344 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:21:48,353 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:21:48,364 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:21:48,364 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:21:48,364 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:21:48,365 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:21:48,368 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f841e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:21:48,368 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30a1c2a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:21:48,479 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7d95bc23{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/java.io.tmpdir/jetty-localhost-39609-hadoop-hdfs-3_4_1-tests_jar-_-any-4120616264101552271/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:21:48,480 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78fa6004{HTTP/1.1, (http/1.1)}{localhost:39609} 2024-11-18T20:21:48,480 INFO [Time-limited test {}] server.Server(415): Started @99972ms 2024-11-18T20:21:48,496 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:21:48,706 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:21:48,711 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:21:48,712 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:21:48,712 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:21:48,713 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:21:48,715 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e23c0c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:21:48,715 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7517d9e5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:21:48,852 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7d69c419{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/java.io.tmpdir/jetty-localhost-45855-hadoop-hdfs-3_4_1-tests_jar-_-any-4811835827480822266/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:21:48,852 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2f2378c9{HTTP/1.1, (http/1.1)}{localhost:45855} 2024-11-18T20:21:48,852 INFO [Time-limited test {}] server.Server(415): Started @100344ms 2024-11-18T20:21:48,855 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:21:48,920 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:21:48,925 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:21:48,926 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:21:48,926 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:21:48,927 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:21:48,927 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ce533a5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:21:48,928 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@aab268d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:21:49,033 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@75434f63{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/java.io.tmpdir/jetty-localhost-39165-hadoop-hdfs-3_4_1-tests_jar-_-any-3830568408674417805/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:21:49,034 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1a2066f8{HTTP/1.1, (http/1.1)}{localhost:39165} 2024-11-18T20:21:49,034 INFO [Time-limited test {}] server.Server(415): Started @100525ms 2024-11-18T20:21:49,036 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:21:49,496 WARN [Thread-444 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/cluster_d4020fe5-de2a-0486-a1e8-3d594434698a/data/data1/current/BP-3853664-172.17.0.2-1731961308136/current, will proceed with Du for space computation calculation, 2024-11-18T20:21:49,496 WARN [Thread-445 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/cluster_d4020fe5-de2a-0486-a1e8-3d594434698a/data/data2/current/BP-3853664-172.17.0.2-1731961308136/current, will proceed with Du for space computation calculation, 2024-11-18T20:21:49,528 WARN [Thread-409 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:21:49,531 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc7d9729ee5b03a92 with lease ID 0x9b84c1e0a61bf758: Processing first storage report for DS-d80e9977-8e0c-4ccf-80ae-8d3fe20cf6c8 from datanode DatanodeRegistration(127.0.0.1:43263, datanodeUuid=c9e45bf8-0759-4bb1-a88b-a4f2175a61a4, infoPort=45863, infoSecurePort=0, ipcPort=42825, storageInfo=lv=-57;cid=testClusterID;nsid=292146758;c=1731961308136) 2024-11-18T20:21:49,531 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc7d9729ee5b03a92 with lease ID 0x9b84c1e0a61bf758: from storage DS-d80e9977-8e0c-4ccf-80ae-8d3fe20cf6c8 node DatanodeRegistration(127.0.0.1:43263, datanodeUuid=c9e45bf8-0759-4bb1-a88b-a4f2175a61a4, infoPort=45863, infoSecurePort=0, ipcPort=42825, storageInfo=lv=-57;cid=testClusterID;nsid=292146758;c=1731961308136), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T20:21:49,532 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc7d9729ee5b03a92 with lease ID 0x9b84c1e0a61bf758: Processing first storage report for DS-13a22df3-1bf9-41f9-9adf-5e380ec0de21 from datanode DatanodeRegistration(127.0.0.1:43263, datanodeUuid=c9e45bf8-0759-4bb1-a88b-a4f2175a61a4, infoPort=45863, infoSecurePort=0, ipcPort=42825, storageInfo=lv=-57;cid=testClusterID;nsid=292146758;c=1731961308136) 2024-11-18T20:21:49,532 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc7d9729ee5b03a92 with lease ID 0x9b84c1e0a61bf758: from storage DS-13a22df3-1bf9-41f9-9adf-5e380ec0de21 node DatanodeRegistration(127.0.0.1:43263, datanodeUuid=c9e45bf8-0759-4bb1-a88b-a4f2175a61a4, infoPort=45863, infoSecurePort=0, ipcPort=42825, storageInfo=lv=-57;cid=testClusterID;nsid=292146758;c=1731961308136), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:21:49,674 WARN [Thread-456 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/cluster_d4020fe5-de2a-0486-a1e8-3d594434698a/data/data3/current/BP-3853664-172.17.0.2-1731961308136/current, will proceed with Du for space computation calculation, 2024-11-18T20:21:49,674 WARN [Thread-457 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/cluster_d4020fe5-de2a-0486-a1e8-3d594434698a/data/data4/current/BP-3853664-172.17.0.2-1731961308136/current, will proceed with Du for space computation calculation, 2024-11-18T20:21:49,697 WARN [Thread-432 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:21:49,699 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa6417ca8eb78e586 with lease ID 0x9b84c1e0a61bf759: Processing first storage report for DS-221286a4-53a6-40fe-b6c4-fa10163c100c from datanode DatanodeRegistration(127.0.0.1:45549, datanodeUuid=fc2d3371-7c06-484e-aaf0-abb14835a751, infoPort=40843, infoSecurePort=0, ipcPort=41647, storageInfo=lv=-57;cid=testClusterID;nsid=292146758;c=1731961308136) 2024-11-18T20:21:49,699 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa6417ca8eb78e586 with lease ID 0x9b84c1e0a61bf759: from storage DS-221286a4-53a6-40fe-b6c4-fa10163c100c node DatanodeRegistration(127.0.0.1:45549, datanodeUuid=fc2d3371-7c06-484e-aaf0-abb14835a751, infoPort=40843, infoSecurePort=0, ipcPort=41647, storageInfo=lv=-57;cid=testClusterID;nsid=292146758;c=1731961308136), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T20:21:49,700 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa6417ca8eb78e586 with lease ID 0x9b84c1e0a61bf759: Processing first storage report for DS-119715b0-bad8-46e6-8483-851285a377f2 from datanode DatanodeRegistration(127.0.0.1:45549, datanodeUuid=fc2d3371-7c06-484e-aaf0-abb14835a751, infoPort=40843, infoSecurePort=0, ipcPort=41647, storageInfo=lv=-57;cid=testClusterID;nsid=292146758;c=1731961308136) 2024-11-18T20:21:49,700 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa6417ca8eb78e586 with lease ID 0x9b84c1e0a61bf759: from storage DS-119715b0-bad8-46e6-8483-851285a377f2 node DatanodeRegistration(127.0.0.1:45549, datanodeUuid=fc2d3371-7c06-484e-aaf0-abb14835a751, infoPort=40843, infoSecurePort=0, ipcPort=41647, storageInfo=lv=-57;cid=testClusterID;nsid=292146758;c=1731961308136), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:21:49,781 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de 2024-11-18T20:21:49,785 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/cluster_d4020fe5-de2a-0486-a1e8-3d594434698a/zookeeper_0, clientPort=58433, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/cluster_d4020fe5-de2a-0486-a1e8-3d594434698a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/cluster_d4020fe5-de2a-0486-a1e8-3d594434698a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T20:21:49,786 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58433 2024-11-18T20:21:49,787 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:21:49,789 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:21:49,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45549 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:21:49,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43263 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:21:49,807 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617 with version=8 2024-11-18T20:21:49,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/hbase-staging 2024-11-18T20:21:49,810 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5a964fc427ed:0 server-side Connection retries=45 2024-11-18T20:21:49,810 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:21:49,810 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:21:49,810 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:21:49,810 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:21:49,810 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:21:49,810 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T20:21:49,811 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:21:49,811 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40271 2024-11-18T20:21:49,813 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40271 connecting to ZooKeeper ensemble=127.0.0.1:58433 2024-11-18T20:21:49,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:402710x0, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:21:49,858 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40271-0x10150ca10010000 connected 2024-11-18T20:21:49,928 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:21:49,931 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:21:49,935 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:21:49,935 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617, hbase.cluster.distributed=false 2024-11-18T20:21:49,938 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:21:49,941 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40271 2024-11-18T20:21:49,942 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40271 2024-11-18T20:21:49,942 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40271 2024-11-18T20:21:49,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40271 2024-11-18T20:21:49,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40271 2024-11-18T20:21:49,969 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5a964fc427ed:0 server-side Connection retries=45 2024-11-18T20:21:49,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:21:49,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:21:49,969 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:21:49,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:21:49,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:21:49,969 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T20:21:49,970 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:21:49,971 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46641 2024-11-18T20:21:49,973 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46641 connecting to ZooKeeper ensemble=127.0.0.1:58433 2024-11-18T20:21:49,974 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:21:49,978 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:21:49,997 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:466410x0, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:21:49,999 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:466410x0, quorum=127.0.0.1:58433, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:21:49,999 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46641-0x10150ca10010001 connected 2024-11-18T20:21:50,000 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T20:21:50,009 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T20:21:50,011 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46641-0x10150ca10010001, quorum=127.0.0.1:58433, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T20:21:50,013 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46641-0x10150ca10010001, quorum=127.0.0.1:58433, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:21:50,022 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46641 2024-11-18T20:21:50,022 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46641 2024-11-18T20:21:50,027 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46641 2024-11-18T20:21:50,029 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46641 2024-11-18T20:21:50,030 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46641 2024-11-18T20:21:50,044 DEBUG [M:0;5a964fc427ed:40271 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5a964fc427ed:40271 2024-11-18T20:21:50,045 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5a964fc427ed,40271,1731961309809 2024-11-18T20:21:50,053 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46641-0x10150ca10010001, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:21:50,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:21:50,053 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5a964fc427ed,40271,1731961309809 2024-11-18T20:21:50,061 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46641-0x10150ca10010001, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T20:21:50,061 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46641-0x10150ca10010001, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:50,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:50,062 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T20:21:50,062 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5a964fc427ed,40271,1731961309809 from backup master directory 2024-11-18T20:21:50,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5a964fc427ed,40271,1731961309809 2024-11-18T20:21:50,069 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46641-0x10150ca10010001, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:21:50,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:21:50,070 WARN [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:21:50,070 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5a964fc427ed,40271,1731961309809 2024-11-18T20:21:50,077 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/hbase.id] with ID: 2ed2a2f1-a9b9-4eaa-a56d-67374bda8f16 2024-11-18T20:21:50,077 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/.tmp/hbase.id 2024-11-18T20:21:50,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45549 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:21:50,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43263 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:21:50,095 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/.tmp/hbase.id]:[hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/hbase.id] 2024-11-18T20:21:50,117 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:21:50,117 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T20:21:50,119 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-18T20:21:50,128 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46641-0x10150ca10010001, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:50,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:50,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43263 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:21:50,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45549 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:21:50,144 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:21:50,146 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T20:21:50,146 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:21:50,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43263 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:21:50,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45549 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:21:50,171 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store 2024-11-18T20:21:50,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43263 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:21:50,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45549 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:21:50,216 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:21:50,216 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:21:50,216 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:21:50,216 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:21:50,216 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:21:50,216 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:21:50,217 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:21:50,217 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961310216Disabling compacts and flushes for region at 1731961310216Disabling writes for close at 1731961310216Writing region close event to WAL at 1731961310216Closed at 1731961310216 2024-11-18T20:21:50,220 WARN [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/.initializing 2024-11-18T20:21:50,220 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/WALs/5a964fc427ed,40271,1731961309809 2024-11-18T20:21:50,225 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5a964fc427ed%2C40271%2C1731961309809, suffix=, logDir=hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/WALs/5a964fc427ed,40271,1731961309809, archiveDir=hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/oldWALs, maxLogs=10 2024-11-18T20:21:50,226 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C40271%2C1731961309809.1731961310225 2024-11-18T20:21:50,235 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/WALs/5a964fc427ed,40271,1731961309809/5a964fc427ed%2C40271%2C1731961309809.1731961310225 2024-11-18T20:21:50,236 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45863:45863),(127.0.0.1/127.0.0.1:40843:40843)] 2024-11-18T20:21:50,237 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:21:50,237 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:21:50,237 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:50,237 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:50,239 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:50,242 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T20:21:50,242 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:50,243 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:21:50,244 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:50,247 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T20:21:50,247 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:50,248 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:21:50,248 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:50,252 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T20:21:50,252 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:50,253 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:21:50,253 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:50,256 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T20:21:50,256 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:50,257 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:21:50,258 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:50,259 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:50,260 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:50,262 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:50,262 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:50,263 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T20:21:50,267 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:50,272 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:21:50,273 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=868005, jitterRate=0.10372616350650787}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T20:21:50,274 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731961310238Initializing all the Stores at 1731961310239 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961310239Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961310239Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961310239Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961310239Cleaning up temporary data from old regions at 1731961310262 (+23 ms)Region opened successfully at 1731961310274 (+12 ms) 2024-11-18T20:21:50,275 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T20:21:50,281 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36bf0d36, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5a964fc427ed/172.17.0.2:0 2024-11-18T20:21:50,282 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T20:21:50,282 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T20:21:50,282 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T20:21:50,283 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T20:21:50,284 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-18T20:21:50,285 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T20:21:50,285 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T20:21:50,291 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T20:21:50,293 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T20:21:50,303 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T20:21:50,303 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T20:21:50,304 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T20:21:50,311 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T20:21:50,312 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T20:21:50,316 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T20:21:50,332 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T20:21:50,336 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T20:21:50,344 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T20:21:50,350 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T20:21:50,357 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T20:21:50,370 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46641-0x10150ca10010001, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:21:50,370 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46641-0x10150ca10010001, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:50,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:21:50,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:50,371 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5a964fc427ed,40271,1731961309809, sessionid=0x10150ca10010000, setting cluster-up flag (Was=false) 2024-11-18T20:21:50,386 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46641-0x10150ca10010001, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:50,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:50,421 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T20:21:50,423 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5a964fc427ed,40271,1731961309809 2024-11-18T20:21:50,457 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46641-0x10150ca10010001, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:50,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:50,482 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T20:21:50,483 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5a964fc427ed,40271,1731961309809 2024-11-18T20:21:50,485 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T20:21:50,488 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T20:21:50,488 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T20:21:50,488 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T20:21:50,489 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5a964fc427ed,40271,1731961309809 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T20:21:50,491 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:21:50,491 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:21:50,491 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:21:50,491 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:21:50,491 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5a964fc427ed:0, corePoolSize=10, maxPoolSize=10 2024-11-18T20:21:50,491 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:50,491 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5a964fc427ed:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:21:50,491 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:50,493 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731961340492 2024-11-18T20:21:50,493 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T20:21:50,493 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T20:21:50,493 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T20:21:50,493 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T20:21:50,493 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T20:21:50,493 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T20:21:50,493 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:50,494 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:21:50,494 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T20:21:50,494 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T20:21:50,494 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T20:21:50,494 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T20:21:50,494 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T20:21:50,495 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T20:21:50,495 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.large.0-1731961310495,5,FailOnTimeoutGroup] 2024-11-18T20:21:50,495 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.small.0-1731961310495,5,FailOnTimeoutGroup] 2024-11-18T20:21:50,495 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:50,495 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T20:21:50,495 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:50,495 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:50,496 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:50,496 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T20:21:50,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45549 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:21:50,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43263 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:21:50,512 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T20:21:50,513 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617 2024-11-18T20:21:50,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43263 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:21:50,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45549 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:21:50,534 INFO [RS:0;5a964fc427ed:46641 {}] regionserver.HRegionServer(746): ClusterId : 2ed2a2f1-a9b9-4eaa-a56d-67374bda8f16 2024-11-18T20:21:50,534 DEBUG [RS:0;5a964fc427ed:46641 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T20:21:50,535 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:21:50,538 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:21:50,540 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:21:50,540 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:50,541 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:21:50,541 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:21:50,543 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:21:50,543 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:50,544 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:21:50,544 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:21:50,545 DEBUG [RS:0;5a964fc427ed:46641 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T20:21:50,545 DEBUG [RS:0;5a964fc427ed:46641 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T20:21:50,546 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:21:50,546 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:50,547 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:21:50,547 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:21:50,549 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:21:50,549 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:50,550 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:21:50,550 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:21:50,551 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/data/hbase/meta/1588230740 2024-11-18T20:21:50,552 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/data/hbase/meta/1588230740 2024-11-18T20:21:50,554 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:21:50,554 DEBUG [RS:0;5a964fc427ed:46641 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T20:21:50,554 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:21:50,555 DEBUG [RS:0;5a964fc427ed:46641 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cc8daf6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5a964fc427ed/172.17.0.2:0 2024-11-18T20:21:50,555 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:21:50,557 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:21:50,560 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:21:50,561 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=868862, jitterRate=0.10481546819210052}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:21:50,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731961310535Initializing all the Stores at 1731961310536 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961310536Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961310537 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961310537Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961310537Cleaning up temporary data from old regions at 1731961310554 (+17 ms)Region opened successfully at 1731961310562 (+8 ms) 2024-11-18T20:21:50,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:21:50,563 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:21:50,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:21:50,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:21:50,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:21:50,564 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:21:50,564 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961310563Disabling compacts and flushes for region at 1731961310563Disabling writes for close at 1731961310563Writing region close event to WAL at 1731961310564 (+1 ms)Closed at 1731961310564 2024-11-18T20:21:50,567 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:21:50,567 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T20:21:50,567 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T20:21:50,570 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:21:50,572 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T20:21:50,578 DEBUG [RS:0;5a964fc427ed:46641 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5a964fc427ed:46641 2024-11-18T20:21:50,578 INFO [RS:0;5a964fc427ed:46641 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T20:21:50,579 INFO [RS:0;5a964fc427ed:46641 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T20:21:50,579 DEBUG [RS:0;5a964fc427ed:46641 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T20:21:50,580 INFO [RS:0;5a964fc427ed:46641 {}] regionserver.HRegionServer(2659): reportForDuty to master=5a964fc427ed,40271,1731961309809 with port=46641, startcode=1731961309968 2024-11-18T20:21:50,580 DEBUG [RS:0;5a964fc427ed:46641 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T20:21:50,588 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49821, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T20:21:50,589 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40271 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5a964fc427ed,46641,1731961309968 2024-11-18T20:21:50,589 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40271 {}] master.ServerManager(517): Registering regionserver=5a964fc427ed,46641,1731961309968 2024-11-18T20:21:50,592 DEBUG [RS:0;5a964fc427ed:46641 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617 2024-11-18T20:21:50,593 DEBUG [RS:0;5a964fc427ed:46641 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42479 2024-11-18T20:21:50,593 DEBUG [RS:0;5a964fc427ed:46641 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T20:21:50,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:21:50,611 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5a964fc427ed,46641,1731961309968] 2024-11-18T20:21:50,618 DEBUG [RS:0;5a964fc427ed:46641 {}] zookeeper.ZKUtil(111): regionserver:46641-0x10150ca10010001, quorum=127.0.0.1:58433, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5a964fc427ed,46641,1731961309968 2024-11-18T20:21:50,618 WARN [RS:0;5a964fc427ed:46641 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:21:50,619 INFO [RS:0;5a964fc427ed:46641 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:21:50,619 DEBUG [RS:0;5a964fc427ed:46641 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/WALs/5a964fc427ed,46641,1731961309968 2024-11-18T20:21:50,625 INFO [RS:0;5a964fc427ed:46641 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T20:21:50,633 INFO [RS:0;5a964fc427ed:46641 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T20:21:50,634 INFO [RS:0;5a964fc427ed:46641 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T20:21:50,634 INFO [RS:0;5a964fc427ed:46641 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:50,635 INFO [RS:0;5a964fc427ed:46641 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T20:21:50,637 INFO [RS:0;5a964fc427ed:46641 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T20:21:50,637 INFO [RS:0;5a964fc427ed:46641 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:50,638 DEBUG [RS:0;5a964fc427ed:46641 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:50,638 DEBUG [RS:0;5a964fc427ed:46641 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:50,638 DEBUG [RS:0;5a964fc427ed:46641 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:50,638 DEBUG [RS:0;5a964fc427ed:46641 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:50,638 DEBUG [RS:0;5a964fc427ed:46641 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:50,638 DEBUG [RS:0;5a964fc427ed:46641 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5a964fc427ed:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:21:50,638 DEBUG [RS:0;5a964fc427ed:46641 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:50,639 DEBUG [RS:0;5a964fc427ed:46641 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:50,639 DEBUG [RS:0;5a964fc427ed:46641 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:50,639 DEBUG [RS:0;5a964fc427ed:46641 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:50,639 DEBUG [RS:0;5a964fc427ed:46641 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:50,639 DEBUG [RS:0;5a964fc427ed:46641 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:50,639 DEBUG [RS:0;5a964fc427ed:46641 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5a964fc427ed:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:21:50,639 DEBUG [RS:0;5a964fc427ed:46641 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:21:50,643 INFO [RS:0;5a964fc427ed:46641 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:50,643 INFO [RS:0;5a964fc427ed:46641 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:50,643 INFO [RS:0;5a964fc427ed:46641 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:50,643 INFO [RS:0;5a964fc427ed:46641 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:50,643 INFO [RS:0;5a964fc427ed:46641 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:50,643 INFO [RS:0;5a964fc427ed:46641 {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,46641,1731961309968-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:21:50,670 INFO [RS:0;5a964fc427ed:46641 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T20:21:50,670 INFO [RS:0;5a964fc427ed:46641 {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,46641,1731961309968-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:50,671 INFO [RS:0;5a964fc427ed:46641 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:50,671 INFO [RS:0;5a964fc427ed:46641 {}] regionserver.Replication(171): 5a964fc427ed,46641,1731961309968 started 2024-11-18T20:21:50,692 INFO [RS:0;5a964fc427ed:46641 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:50,692 INFO [RS:0;5a964fc427ed:46641 {}] regionserver.HRegionServer(1482): Serving as 5a964fc427ed,46641,1731961309968, RpcServer on 5a964fc427ed/172.17.0.2:46641, sessionid=0x10150ca10010001 2024-11-18T20:21:50,692 DEBUG [RS:0;5a964fc427ed:46641 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T20:21:50,692 DEBUG [RS:0;5a964fc427ed:46641 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5a964fc427ed,46641,1731961309968 2024-11-18T20:21:50,692 DEBUG [RS:0;5a964fc427ed:46641 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5a964fc427ed,46641,1731961309968' 2024-11-18T20:21:50,693 DEBUG [RS:0;5a964fc427ed:46641 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T20:21:50,693 DEBUG [RS:0;5a964fc427ed:46641 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T20:21:50,694 DEBUG [RS:0;5a964fc427ed:46641 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T20:21:50,694 DEBUG [RS:0;5a964fc427ed:46641 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T20:21:50,694 DEBUG [RS:0;5a964fc427ed:46641 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5a964fc427ed,46641,1731961309968 2024-11-18T20:21:50,694 DEBUG [RS:0;5a964fc427ed:46641 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5a964fc427ed,46641,1731961309968' 2024-11-18T20:21:50,694 DEBUG [RS:0;5a964fc427ed:46641 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T20:21:50,695 DEBUG [RS:0;5a964fc427ed:46641 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T20:21:50,696 DEBUG [RS:0;5a964fc427ed:46641 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T20:21:50,696 INFO [RS:0;5a964fc427ed:46641 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T20:21:50,696 INFO [RS:0;5a964fc427ed:46641 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T20:21:50,723 WARN [5a964fc427ed:40271 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T20:21:50,799 INFO [RS:0;5a964fc427ed:46641 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5a964fc427ed%2C46641%2C1731961309968, suffix=, logDir=hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/WALs/5a964fc427ed,46641,1731961309968, archiveDir=hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/oldWALs, maxLogs=32 2024-11-18T20:21:50,802 INFO [RS:0;5a964fc427ed:46641 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C46641%2C1731961309968.1731961310801 2024-11-18T20:21:50,812 INFO [RS:0;5a964fc427ed:46641 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/WALs/5a964fc427ed,46641,1731961309968/5a964fc427ed%2C46641%2C1731961309968.1731961310801 2024-11-18T20:21:50,815 DEBUG [RS:0;5a964fc427ed:46641 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40843:40843),(127.0.0.1/127.0.0.1:45863:45863)] 2024-11-18T20:21:50,973 DEBUG [5a964fc427ed:40271 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T20:21:50,974 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5a964fc427ed,46641,1731961309968 2024-11-18T20:21:50,976 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5a964fc427ed,46641,1731961309968, state=OPENING 2024-11-18T20:21:51,164 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T20:21:51,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:51,217 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46641-0x10150ca10010001, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:51,218 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:21:51,218 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:21:51,218 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:21:51,218 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5a964fc427ed,46641,1731961309968}] 2024-11-18T20:21:51,372 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T20:21:51,376 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35487, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T20:21:51,381 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T20:21:51,382 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:21:51,384 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5a964fc427ed%2C46641%2C1731961309968.meta, suffix=.meta, logDir=hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/WALs/5a964fc427ed,46641,1731961309968, archiveDir=hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/oldWALs, maxLogs=32 2024-11-18T20:21:51,387 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C46641%2C1731961309968.meta.1731961311386.meta 2024-11-18T20:21:51,408 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/WALs/5a964fc427ed,46641,1731961309968/5a964fc427ed%2C46641%2C1731961309968.meta.1731961311386.meta 2024-11-18T20:21:51,410 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45863:45863),(127.0.0.1/127.0.0.1:40843:40843)] 2024-11-18T20:21:51,422 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:21:51,422 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T20:21:51,423 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T20:21:51,423 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T20:21:51,423 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T20:21:51,423 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:21:51,423 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T20:21:51,423 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T20:21:51,427 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:21:51,428 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:21:51,428 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:51,429 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:21:51,429 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:21:51,431 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:21:51,431 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:51,432 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:21:51,432 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:21:51,433 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:21:51,433 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:51,434 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:21:51,434 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:21:51,435 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:21:51,435 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:51,436 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:21:51,436 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:21:51,438 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/data/hbase/meta/1588230740 2024-11-18T20:21:51,443 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/data/hbase/meta/1588230740 2024-11-18T20:21:51,445 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:21:51,445 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:21:51,446 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:21:51,448 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:21:51,450 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=870331, jitterRate=0.10668377578258514}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:21:51,450 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T20:21:51,452 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731961311424Writing region info on filesystem at 1731961311424Initializing all the Stores at 1731961311425 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961311426 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961311426Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961311426Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961311427 (+1 ms)Cleaning up temporary data from old regions at 1731961311445 (+18 ms)Running coprocessor post-open hooks at 1731961311450 (+5 ms)Region opened successfully at 1731961311451 (+1 ms) 2024-11-18T20:21:51,455 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731961311372 2024-11-18T20:21:51,460 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T20:21:51,460 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T20:21:51,462 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5a964fc427ed,46641,1731961309968 2024-11-18T20:21:51,463 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5a964fc427ed,46641,1731961309968, state=OPEN 2024-11-18T20:21:51,486 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:21:51,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:21:52,008 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T20:21:52,019 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:21:52,036 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:21:52,773 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46641-0x10150ca10010001, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:21:52,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:21:52,773 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5a964fc427ed,46641,1731961309968 2024-11-18T20:21:52,773 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:21:52,773 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:21:52,778 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T20:21:52,778 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5a964fc427ed,46641,1731961309968 in 1.5550 sec 2024-11-18T20:21:52,782 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T20:21:52,783 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 2.2110 sec 2024-11-18T20:21:52,784 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:21:52,784 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T20:21:52,786 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:21:52,786 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5a964fc427ed,46641,1731961309968, seqNum=-1] 2024-11-18T20:21:52,787 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:21:52,789 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45745, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:21:52,798 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 2.3100 sec 2024-11-18T20:21:52,798 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731961312798, completionTime=-1 2024-11-18T20:21:52,799 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T20:21:52,799 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T20:21:52,801 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T20:21:52,801 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731961372801 2024-11-18T20:21:52,801 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731961432801 2024-11-18T20:21:52,802 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-18T20:21:52,802 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,40271,1731961309809-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:52,802 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,40271,1731961309809-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:52,802 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,40271,1731961309809-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:52,802 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5a964fc427ed:40271, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:52,803 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:52,803 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:52,805 DEBUG [master/5a964fc427ed:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T20:21:52,808 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.738sec 2024-11-18T20:21:52,808 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T20:21:52,808 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T20:21:52,808 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T20:21:52,808 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T20:21:52,808 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T20:21:52,808 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,40271,1731961309809-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:21:52,808 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,40271,1731961309809-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T20:21:52,811 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T20:21:52,812 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T20:21:52,812 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,40271,1731961309809-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:52,835 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57927b81, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:21:52,835 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5a964fc427ed,40271,-1 for getting cluster id 2024-11-18T20:21:52,835 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T20:21:52,837 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2ed2a2f1-a9b9-4eaa-a56d-67374bda8f16' 2024-11-18T20:21:52,838 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T20:21:52,838 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2ed2a2f1-a9b9-4eaa-a56d-67374bda8f16" 2024-11-18T20:21:52,838 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10fcfe5c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:21:52,838 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5a964fc427ed,40271,-1] 2024-11-18T20:21:52,839 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T20:21:52,839 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:21:52,841 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60722, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T20:21:52,842 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6aa3a1e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:21:52,842 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:21:52,844 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5a964fc427ed,46641,1731961309968, seqNum=-1] 2024-11-18T20:21:52,844 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:21:52,846 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33358, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:21:52,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5a964fc427ed,40271,1731961309809 2024-11-18T20:21:52,849 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:21:52,852 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T20:21:52,852 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T20:21:52,853 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:21:52,853 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:21:52,853 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:21:52,853 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:21:52,853 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T20:21:52,853 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T20:21:52,853 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=571199657, stopped=false 2024-11-18T20:21:52,853 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5a964fc427ed,40271,1731961309809 2024-11-18T20:21:52,865 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46641-0x10150ca10010001, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:21:52,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:21:52,865 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:21:52,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:52,865 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46641-0x10150ca10010001, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:52,865 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:21:52,865 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:21:52,866 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:21:52,866 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46641-0x10150ca10010001, quorum=127.0.0.1:58433, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:21:52,866 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:21:52,866 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5a964fc427ed,46641,1731961309968' ***** 2024-11-18T20:21:52,866 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T20:21:52,866 INFO [RS:0;5a964fc427ed:46641 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T20:21:52,866 INFO [RS:0;5a964fc427ed:46641 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T20:21:52,866 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T20:21:52,866 INFO [RS:0;5a964fc427ed:46641 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T20:21:52,866 INFO [RS:0;5a964fc427ed:46641 {}] regionserver.HRegionServer(959): stopping server 5a964fc427ed,46641,1731961309968 2024-11-18T20:21:52,866 INFO [RS:0;5a964fc427ed:46641 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:21:52,866 INFO [RS:0;5a964fc427ed:46641 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5a964fc427ed:46641. 2024-11-18T20:21:52,866 DEBUG [RS:0;5a964fc427ed:46641 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:21:52,867 DEBUG [RS:0;5a964fc427ed:46641 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:21:52,867 INFO [RS:0;5a964fc427ed:46641 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T20:21:52,867 INFO [RS:0;5a964fc427ed:46641 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T20:21:52,867 INFO [RS:0;5a964fc427ed:46641 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T20:21:52,867 INFO [RS:0;5a964fc427ed:46641 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T20:21:52,867 INFO [RS:0;5a964fc427ed:46641 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-18T20:21:52,867 DEBUG [RS:0;5a964fc427ed:46641 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-18T20:21:52,867 DEBUG [RS:0;5a964fc427ed:46641 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T20:21:52,867 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:21:52,867 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:21:52,867 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:21:52,867 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:21:52,867 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:21:52,868 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-18T20:21:52,885 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/data/hbase/meta/1588230740/.tmp/ns/40c73cda908b4e1b9e2d668102e6bb34 is 43, key is default/ns:d/1731961312790/Put/seqid=0 2024-11-18T20:21:52,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45549 is added to blk_1073741835_1011 (size=5153) 2024-11-18T20:21:52,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43263 is added to blk_1073741835_1011 (size=5153) 2024-11-18T20:21:52,892 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/data/hbase/meta/1588230740/.tmp/ns/40c73cda908b4e1b9e2d668102e6bb34 2024-11-18T20:21:52,903 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/data/hbase/meta/1588230740/.tmp/ns/40c73cda908b4e1b9e2d668102e6bb34 as hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/data/hbase/meta/1588230740/ns/40c73cda908b4e1b9e2d668102e6bb34 2024-11-18T20:21:52,912 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/data/hbase/meta/1588230740/ns/40c73cda908b4e1b9e2d668102e6bb34, entries=2, sequenceid=6, filesize=5.0 K 2024-11-18T20:21:52,914 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 46ms, sequenceid=6, compaction requested=false 2024-11-18T20:21:52,914 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T20:21:52,920 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-18T20:21:52,921 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:21:52,921 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:21:52,921 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961312867Running coprocessor pre-close hooks at 1731961312867Disabling compacts and flushes for region at 1731961312867Disabling writes for close at 1731961312867Obtaining lock to block concurrent updates at 1731961312868 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731961312868Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731961312868Flushing stores of hbase:meta,,1.1588230740 at 1731961312869 (+1 ms)Flushing 1588230740/ns: creating writer at 1731961312869Flushing 1588230740/ns: appending metadata at 1731961312885 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731961312885Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e936b25: reopening flushed file at 1731961312901 (+16 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 46ms, sequenceid=6, compaction requested=false at 1731961312914 (+13 ms)Writing region close event to WAL at 1731961312915 (+1 ms)Running coprocessor post-close hooks at 1731961312921 (+6 ms)Closed at 1731961312921 2024-11-18T20:21:52,921 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T20:21:53,067 INFO [RS:0;5a964fc427ed:46641 {}] regionserver.HRegionServer(976): stopping server 5a964fc427ed,46641,1731961309968; all regions closed. 2024-11-18T20:21:53,068 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:53,068 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:53,068 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:53,069 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:53,069 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:53,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45549 is added to blk_1073741834_1010 (size=1152) 2024-11-18T20:21:53,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43263 is added to blk_1073741834_1010 (size=1152) 2024-11-18T20:21:53,074 DEBUG [RS:0;5a964fc427ed:46641 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/oldWALs 2024-11-18T20:21:53,074 INFO [RS:0;5a964fc427ed:46641 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5a964fc427ed%2C46641%2C1731961309968.meta:.meta(num 1731961311386) 2024-11-18T20:21:53,074 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:53,075 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:53,075 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:53,075 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:53,075 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:53,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43263 is added to blk_1073741833_1009 (size=93) 2024-11-18T20:21:53,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45549 is added to blk_1073741833_1009 (size=93) 2024-11-18T20:21:53,079 DEBUG [RS:0;5a964fc427ed:46641 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/oldWALs 2024-11-18T20:21:53,079 INFO [RS:0;5a964fc427ed:46641 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5a964fc427ed%2C46641%2C1731961309968:(num 1731961310801) 2024-11-18T20:21:53,079 DEBUG [RS:0;5a964fc427ed:46641 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:21:53,079 INFO [RS:0;5a964fc427ed:46641 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:21:53,080 INFO [RS:0;5a964fc427ed:46641 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:21:53,080 INFO [RS:0;5a964fc427ed:46641 {}] hbase.ChoreService(370): Chore service for: regionserver/5a964fc427ed:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T20:21:53,080 INFO [RS:0;5a964fc427ed:46641 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:21:53,080 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:21:53,080 INFO [RS:0;5a964fc427ed:46641 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46641 2024-11-18T20:21:53,090 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46641-0x10150ca10010001, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5a964fc427ed,46641,1731961309968 2024-11-18T20:21:53,090 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:21:53,090 INFO [RS:0;5a964fc427ed:46641 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:21:53,091 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5a964fc427ed,46641,1731961309968] 2024-11-18T20:21:53,107 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5a964fc427ed,46641,1731961309968 already deleted, retry=false 2024-11-18T20:21:53,107 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5a964fc427ed,46641,1731961309968 expired; onlineServers=0 2024-11-18T20:21:53,107 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5a964fc427ed,40271,1731961309809' ***** 2024-11-18T20:21:53,107 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T20:21:53,107 INFO [M:0;5a964fc427ed:40271 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:21:53,107 INFO [M:0;5a964fc427ed:40271 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:21:53,107 DEBUG [M:0;5a964fc427ed:40271 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T20:21:53,108 DEBUG [M:0;5a964fc427ed:40271 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T20:21:53,108 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T20:21:53,108 DEBUG [master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.small.0-1731961310495 {}] cleaner.HFileCleaner(306): Exit Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.small.0-1731961310495,5,FailOnTimeoutGroup] 2024-11-18T20:21:53,108 DEBUG [master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.large.0-1731961310495 {}] cleaner.HFileCleaner(306): Exit Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.large.0-1731961310495,5,FailOnTimeoutGroup] 2024-11-18T20:21:53,108 INFO [M:0;5a964fc427ed:40271 {}] hbase.ChoreService(370): Chore service for: master/5a964fc427ed:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T20:21:53,108 INFO [M:0;5a964fc427ed:40271 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:21:53,108 DEBUG [M:0;5a964fc427ed:40271 {}] master.HMaster(1795): Stopping service threads 2024-11-18T20:21:53,108 INFO [M:0;5a964fc427ed:40271 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T20:21:53,109 INFO [M:0;5a964fc427ed:40271 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:21:53,109 INFO [M:0;5a964fc427ed:40271 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T20:21:53,109 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T20:21:53,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T20:21:53,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:53,115 DEBUG [M:0;5a964fc427ed:40271 {}] zookeeper.ZKUtil(347): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T20:21:53,115 WARN [M:0;5a964fc427ed:40271 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T20:21:53,117 INFO [M:0;5a964fc427ed:40271 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/.lastflushedseqids 2024-11-18T20:21:53,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45549 is added to blk_1073741836_1012 (size=99) 2024-11-18T20:21:53,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43263 is added to blk_1073741836_1012 (size=99) 2024-11-18T20:21:53,128 INFO [M:0;5a964fc427ed:40271 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T20:21:53,128 INFO [M:0;5a964fc427ed:40271 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T20:21:53,128 DEBUG [M:0;5a964fc427ed:40271 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:21:53,128 INFO [M:0;5a964fc427ed:40271 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:21:53,128 DEBUG [M:0;5a964fc427ed:40271 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:21:53,128 DEBUG [M:0;5a964fc427ed:40271 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:21:53,128 DEBUG [M:0;5a964fc427ed:40271 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:21:53,128 INFO [M:0;5a964fc427ed:40271 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-18T20:21:53,144 DEBUG [M:0;5a964fc427ed:40271 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6accf08bcc3344389ca2d8031e9d366a is 82, key is hbase:meta,,1/info:regioninfo/1731961311462/Put/seqid=0 2024-11-18T20:21:53,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45549 is added to blk_1073741837_1013 (size=5672) 2024-11-18T20:21:53,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43263 is added to blk_1073741837_1013 (size=5672) 2024-11-18T20:21:53,150 INFO [M:0;5a964fc427ed:40271 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6accf08bcc3344389ca2d8031e9d366a 2024-11-18T20:21:53,177 DEBUG [M:0;5a964fc427ed:40271 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b6ee3e6c14b045f8820f35ae5428c578 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731961312797/Put/seqid=0 2024-11-18T20:21:53,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43263 is added to blk_1073741838_1014 (size=5275) 2024-11-18T20:21:53,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45549 is added to blk_1073741838_1014 (size=5275) 2024-11-18T20:21:53,184 INFO [M:0;5a964fc427ed:40271 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b6ee3e6c14b045f8820f35ae5428c578 2024-11-18T20:21:53,198 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46641-0x10150ca10010001, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:21:53,198 INFO [RS:0;5a964fc427ed:46641 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:21:53,199 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46641-0x10150ca10010001, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:21:53,199 INFO [RS:0;5a964fc427ed:46641 {}] regionserver.HRegionServer(1031): Exiting; stopping=5a964fc427ed,46641,1731961309968; zookeeper connection closed. 2024-11-18T20:21:53,199 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@9b6063e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@9b6063e 2024-11-18T20:21:53,199 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T20:21:53,206 DEBUG [M:0;5a964fc427ed:40271 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/feb98688fbbb421cbbe67b1c4574e813 is 69, key is 5a964fc427ed,46641,1731961309968/rs:state/1731961310589/Put/seqid=0 2024-11-18T20:21:53,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45549 is added to blk_1073741839_1015 (size=5156) 2024-11-18T20:21:53,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43263 is added to blk_1073741839_1015 (size=5156) 2024-11-18T20:21:53,213 INFO [M:0;5a964fc427ed:40271 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/feb98688fbbb421cbbe67b1c4574e813 2024-11-18T20:21:53,234 DEBUG [M:0;5a964fc427ed:40271 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/548cbb3fa5914e1a84cc1a8f00ef6357 is 52, key is load_balancer_on/state:d/1731961312851/Put/seqid=0 2024-11-18T20:21:53,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43263 is added to blk_1073741840_1016 (size=5056) 2024-11-18T20:21:53,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45549 is added to blk_1073741840_1016 (size=5056) 2024-11-18T20:21:53,243 INFO [M:0;5a964fc427ed:40271 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/548cbb3fa5914e1a84cc1a8f00ef6357 2024-11-18T20:21:53,250 DEBUG [M:0;5a964fc427ed:40271 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6accf08bcc3344389ca2d8031e9d366a as hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6accf08bcc3344389ca2d8031e9d366a 2024-11-18T20:21:53,257 INFO [M:0;5a964fc427ed:40271 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6accf08bcc3344389ca2d8031e9d366a, entries=8, sequenceid=29, filesize=5.5 K 2024-11-18T20:21:53,258 DEBUG [M:0;5a964fc427ed:40271 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b6ee3e6c14b045f8820f35ae5428c578 as hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b6ee3e6c14b045f8820f35ae5428c578 2024-11-18T20:21:53,265 INFO [M:0;5a964fc427ed:40271 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b6ee3e6c14b045f8820f35ae5428c578, entries=3, sequenceid=29, filesize=5.2 K 2024-11-18T20:21:53,266 DEBUG [M:0;5a964fc427ed:40271 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/feb98688fbbb421cbbe67b1c4574e813 as hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/feb98688fbbb421cbbe67b1c4574e813 2024-11-18T20:21:53,273 INFO [M:0;5a964fc427ed:40271 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/feb98688fbbb421cbbe67b1c4574e813, entries=1, sequenceid=29, filesize=5.0 K 2024-11-18T20:21:53,274 DEBUG [M:0;5a964fc427ed:40271 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/548cbb3fa5914e1a84cc1a8f00ef6357 as hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/548cbb3fa5914e1a84cc1a8f00ef6357 2024-11-18T20:21:53,280 INFO [M:0;5a964fc427ed:40271 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42479/user/jenkins/test-data/6a4e176e-398a-f074-6c4e-40d44eeda617/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/548cbb3fa5914e1a84cc1a8f00ef6357, entries=1, sequenceid=29, filesize=4.9 K 2024-11-18T20:21:53,282 INFO [M:0;5a964fc427ed:40271 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 153ms, sequenceid=29, compaction requested=false 2024-11-18T20:21:53,283 INFO [M:0;5a964fc427ed:40271 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:21:53,283 DEBUG [M:0;5a964fc427ed:40271 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961313128Disabling compacts and flushes for region at 1731961313128Disabling writes for close at 1731961313128Obtaining lock to block concurrent updates at 1731961313128Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731961313128Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731961313129 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731961313129Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731961313129Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731961313144 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731961313144Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731961313157 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731961313177 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731961313177Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731961313190 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731961313206 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731961313206Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731961313219 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731961313234 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731961313234Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b49c8c9: reopening flushed file at 1731961313249 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d69f206: reopening flushed file at 1731961313257 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f72ee2a: reopening flushed file at 1731961313265 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@269f295d: reopening flushed file at 1731961313273 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 153ms, sequenceid=29, compaction requested=false at 1731961313282 (+9 ms)Writing region close event to WAL at 1731961313283 (+1 ms)Closed at 1731961313283 2024-11-18T20:21:53,283 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:53,283 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:53,284 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:53,284 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:53,284 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:21:53,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45549 is added to blk_1073741830_1006 (size=10311) 2024-11-18T20:21:53,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43263 is added to blk_1073741830_1006 (size=10311) 2024-11-18T20:21:53,287 INFO [M:0;5a964fc427ed:40271 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T20:21:53,287 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:21:53,287 INFO [M:0;5a964fc427ed:40271 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40271 2024-11-18T20:21:53,287 INFO [M:0;5a964fc427ed:40271 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:21:53,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:21:53,394 INFO [M:0;5a964fc427ed:40271 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:21:53,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40271-0x10150ca10010000, quorum=127.0.0.1:58433, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:21:53,398 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@75434f63{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:21:53,399 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1a2066f8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:21:53,399 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:21:53,399 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@aab268d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:21:53,399 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ce533a5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/hadoop.log.dir/,STOPPED} 2024-11-18T20:21:53,401 WARN [BP-3853664-172.17.0.2-1731961308136 heartbeating to localhost/127.0.0.1:42479 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:21:53,401 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:21:53,401 WARN [BP-3853664-172.17.0.2-1731961308136 heartbeating to localhost/127.0.0.1:42479 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-3853664-172.17.0.2-1731961308136 (Datanode Uuid fc2d3371-7c06-484e-aaf0-abb14835a751) service to localhost/127.0.0.1:42479 2024-11-18T20:21:53,401 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:21:53,401 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/cluster_d4020fe5-de2a-0486-a1e8-3d594434698a/data/data3/current/BP-3853664-172.17.0.2-1731961308136 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:21:53,402 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/cluster_d4020fe5-de2a-0486-a1e8-3d594434698a/data/data4/current/BP-3853664-172.17.0.2-1731961308136 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:21:53,402 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:21:53,405 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7d69c419{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:21:53,405 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2f2378c9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:21:53,406 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:21:53,406 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7517d9e5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:21:53,406 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e23c0c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/hadoop.log.dir/,STOPPED} 2024-11-18T20:21:53,408 WARN [BP-3853664-172.17.0.2-1731961308136 heartbeating to localhost/127.0.0.1:42479 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:21:53,408 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:21:53,408 WARN [BP-3853664-172.17.0.2-1731961308136 heartbeating to localhost/127.0.0.1:42479 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-3853664-172.17.0.2-1731961308136 (Datanode Uuid c9e45bf8-0759-4bb1-a88b-a4f2175a61a4) service to localhost/127.0.0.1:42479 2024-11-18T20:21:53,408 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:21:53,409 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/cluster_d4020fe5-de2a-0486-a1e8-3d594434698a/data/data1/current/BP-3853664-172.17.0.2-1731961308136 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:21:53,409 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/cluster_d4020fe5-de2a-0486-a1e8-3d594434698a/data/data2/current/BP-3853664-172.17.0.2-1731961308136 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:21:53,409 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:21:53,415 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7d95bc23{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:21:53,415 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78fa6004{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:21:53,415 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:21:53,416 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30a1c2a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:21:53,416 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f841e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/hadoop.log.dir/,STOPPED} 2024-11-18T20:21:53,423 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T20:21:53,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T20:21:53,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T20:21:53,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/hadoop.log.dir so I do NOT create it in target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1 2024-11-18T20:21:53,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16ae73f6-4606-df9c-3b62-6a9d3f7727de/hadoop.tmp.dir so I do NOT create it in target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1 2024-11-18T20:21:53,448 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4, deleteOnExit=true 2024-11-18T20:21:53,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T20:21:53,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/test.cache.data in system properties and HBase conf 2024-11-18T20:21:53,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T20:21:53,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/hadoop.log.dir in system properties and HBase conf 2024-11-18T20:21:53,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T20:21:53,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T20:21:53,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T20:21:53,449 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T20:21:53,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:21:53,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:21:53,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T20:21:53,450 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:21:53,450 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T20:21:53,450 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T20:21:53,450 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:21:53,450 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:21:53,450 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T20:21:53,450 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/nfs.dump.dir in system properties and HBase conf 2024-11-18T20:21:53,451 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/java.io.tmpdir in system properties and HBase conf 2024-11-18T20:21:53,451 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:21:53,451 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T20:21:53,451 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T20:21:53,468 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:21:53,678 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:21:53,684 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:21:53,685 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:21:53,685 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:21:53,685 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:21:53,686 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:21:53,686 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1aa07d80{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:21:53,687 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3150e6db{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:21:53,787 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2606b08f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/java.io.tmpdir/jetty-localhost-33043-hadoop-hdfs-3_4_1-tests_jar-_-any-5856003597848248458/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:21:53,788 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@c053989{HTTP/1.1, (http/1.1)}{localhost:33043} 2024-11-18T20:21:53,788 INFO [Time-limited test {}] server.Server(415): Started @105279ms 2024-11-18T20:21:53,801 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:21:53,979 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:21:53,982 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:21:53,983 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:21:53,983 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:21:53,983 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:21:53,984 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2cb9bebc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:21:53,984 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bf32f74{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:21:54,077 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@c77eea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/java.io.tmpdir/jetty-localhost-43629-hadoop-hdfs-3_4_1-tests_jar-_-any-13697799993455923427/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:21:54,077 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1e20426d{HTTP/1.1, (http/1.1)}{localhost:43629} 2024-11-18T20:21:54,078 INFO [Time-limited test {}] server.Server(415): Started @105569ms 2024-11-18T20:21:54,079 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:21:54,107 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:21:54,111 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:21:54,112 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:21:54,112 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:21:54,112 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:21:54,113 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f7f19bf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:21:54,113 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32403ac6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:21:54,206 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6cd7b3e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/java.io.tmpdir/jetty-localhost-41201-hadoop-hdfs-3_4_1-tests_jar-_-any-7684755329323146649/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:21:54,207 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ff95875{HTTP/1.1, (http/1.1)}{localhost:41201} 2024-11-18T20:21:54,207 INFO [Time-limited test {}] server.Server(415): Started @105698ms 2024-11-18T20:21:54,208 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:21:54,623 WARN [Thread-664 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data1/current/BP-1540393714-172.17.0.2-1731961313479/current, will proceed with Du for space computation calculation, 2024-11-18T20:21:54,623 WARN [Thread-665 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data2/current/BP-1540393714-172.17.0.2-1731961313479/current, will proceed with Du for space computation calculation, 2024-11-18T20:21:54,641 WARN [Thread-629 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:21:54,646 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x98ab646f34a3aa3c with lease ID 0x3ead2f9b91647e8e: Processing first storage report for DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838 from datanode DatanodeRegistration(127.0.0.1:41917, datanodeUuid=37be7c4f-1f71-471c-8a95-e3131df48058, infoPort=40299, infoSecurePort=0, ipcPort=38463, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479) 2024-11-18T20:21:54,646 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x98ab646f34a3aa3c with lease ID 0x3ead2f9b91647e8e: from storage DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838 node DatanodeRegistration(127.0.0.1:41917, datanodeUuid=37be7c4f-1f71-471c-8a95-e3131df48058, infoPort=40299, infoSecurePort=0, ipcPort=38463, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:21:54,646 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x98ab646f34a3aa3c with lease ID 0x3ead2f9b91647e8e: Processing first storage report for DS-90a44a7f-4a22-4493-981d-4f4fd0f2428c from datanode DatanodeRegistration(127.0.0.1:41917, datanodeUuid=37be7c4f-1f71-471c-8a95-e3131df48058, infoPort=40299, infoSecurePort=0, ipcPort=38463, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479) 2024-11-18T20:21:54,646 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x98ab646f34a3aa3c with lease ID 0x3ead2f9b91647e8e: from storage DS-90a44a7f-4a22-4493-981d-4f4fd0f2428c node DatanodeRegistration(127.0.0.1:41917, datanodeUuid=37be7c4f-1f71-471c-8a95-e3131df48058, infoPort=40299, infoSecurePort=0, ipcPort=38463, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:21:54,647 INFO [regionserver/5a964fc427ed:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:21:54,788 WARN [Thread-677 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data4/current/BP-1540393714-172.17.0.2-1731961313479/current, will proceed with Du for space computation calculation, 2024-11-18T20:21:54,788 WARN [Thread-676 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data3/current/BP-1540393714-172.17.0.2-1731961313479/current, will proceed with Du for space computation calculation, 2024-11-18T20:21:54,815 WARN [Thread-652 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:21:54,818 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x92cb3770b48438b5 with lease ID 0x3ead2f9b91647e8f: Processing first storage report for DS-752959bd-3814-4dce-8848-a7add3965661 from datanode DatanodeRegistration(127.0.0.1:36171, datanodeUuid=668a6dfd-9553-4de0-a7a1-42a381a2d7bc, infoPort=45167, infoSecurePort=0, ipcPort=36691, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479) 2024-11-18T20:21:54,818 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x92cb3770b48438b5 with lease ID 0x3ead2f9b91647e8f: from storage DS-752959bd-3814-4dce-8848-a7add3965661 node DatanodeRegistration(127.0.0.1:36171, datanodeUuid=668a6dfd-9553-4de0-a7a1-42a381a2d7bc, infoPort=45167, infoSecurePort=0, ipcPort=36691, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:21:54,818 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x92cb3770b48438b5 with lease ID 0x3ead2f9b91647e8f: Processing first storage report for DS-4a13fe2d-9a3d-4aca-894a-3df24200b229 from datanode DatanodeRegistration(127.0.0.1:36171, datanodeUuid=668a6dfd-9553-4de0-a7a1-42a381a2d7bc, infoPort=45167, infoSecurePort=0, ipcPort=36691, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479) 2024-11-18T20:21:54,818 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x92cb3770b48438b5 with lease ID 0x3ead2f9b91647e8f: from storage DS-4a13fe2d-9a3d-4aca-894a-3df24200b229 node DatanodeRegistration(127.0.0.1:36171, datanodeUuid=668a6dfd-9553-4de0-a7a1-42a381a2d7bc, infoPort=45167, infoSecurePort=0, ipcPort=36691, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:21:54,841 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1 2024-11-18T20:21:54,844 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/zookeeper_0, clientPort=58477, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T20:21:54,845 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58477 2024-11-18T20:21:54,845 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:21:54,846 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:21:54,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41917 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:21:54,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:21:54,857 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf with version=8 2024-11-18T20:21:54,857 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/hbase-staging 2024-11-18T20:21:54,859 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5a964fc427ed:0 server-side Connection retries=45 2024-11-18T20:21:54,859 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:21:54,860 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:21:54,860 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:21:54,860 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:21:54,860 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:21:54,860 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T20:21:54,860 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:21:54,861 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37903 2024-11-18T20:21:54,862 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37903 connecting to ZooKeeper ensemble=127.0.0.1:58477 2024-11-18T20:21:54,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:379030x0, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:21:54,903 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37903-0x10150ca23c40000 connected 2024-11-18T20:21:54,968 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:21:54,970 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:21:54,972 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:21:54,972 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf, hbase.cluster.distributed=false 2024-11-18T20:21:54,974 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:21:54,975 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37903 2024-11-18T20:21:54,975 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37903 2024-11-18T20:21:54,975 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37903 2024-11-18T20:21:54,975 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37903 2024-11-18T20:21:54,976 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37903 2024-11-18T20:21:54,990 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5a964fc427ed:0 server-side Connection retries=45 2024-11-18T20:21:54,990 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:21:54,990 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:21:54,990 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:21:54,990 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:21:54,990 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:21:54,990 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T20:21:54,991 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:21:54,991 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38299 2024-11-18T20:21:54,993 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38299 connecting to ZooKeeper ensemble=127.0.0.1:58477 2024-11-18T20:21:54,993 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:21:54,995 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:21:55,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:382990x0, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:21:55,002 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:382990x0, quorum=127.0.0.1:58477, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:21:55,002 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38299-0x10150ca23c40001 connected 2024-11-18T20:21:55,002 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T20:21:55,003 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T20:21:55,004 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38299-0x10150ca23c40001, quorum=127.0.0.1:58477, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T20:21:55,005 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38299-0x10150ca23c40001, quorum=127.0.0.1:58477, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:21:55,006 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38299 2024-11-18T20:21:55,008 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38299 2024-11-18T20:21:55,010 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38299 2024-11-18T20:21:55,010 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38299 2024-11-18T20:21:55,010 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38299 2024-11-18T20:21:55,022 DEBUG [M:0;5a964fc427ed:37903 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5a964fc427ed:37903 2024-11-18T20:21:55,023 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5a964fc427ed,37903,1731961314859 2024-11-18T20:21:55,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:21:55,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38299-0x10150ca23c40001, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:21:55,034 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5a964fc427ed,37903,1731961314859 2024-11-18T20:21:55,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:55,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38299-0x10150ca23c40001, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T20:21:55,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38299-0x10150ca23c40001, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:55,044 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T20:21:55,044 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5a964fc427ed,37903,1731961314859 from backup master directory 2024-11-18T20:21:55,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38299-0x10150ca23c40001, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:21:55,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5a964fc427ed,37903,1731961314859 2024-11-18T20:21:55,051 WARN [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:21:55,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:21:55,052 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5a964fc427ed,37903,1731961314859 2024-11-18T20:21:55,056 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/hbase.id] with ID: 8e3c65a6-53a5-4b12-9d35-9a0ec6eb6ff7 2024-11-18T20:21:55,056 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/.tmp/hbase.id 2024-11-18T20:21:55,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41917 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:21:55,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:21:55,065 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/.tmp/hbase.id]:[hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/hbase.id] 2024-11-18T20:21:55,078 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:21:55,079 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T20:21:55,080 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-18T20:21:55,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38299-0x10150ca23c40001, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:55,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:55,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:21:55,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41917 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:21:55,131 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:21:55,132 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T20:21:55,132 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:21:55,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41917 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:21:55,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:21:55,146 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store 2024-11-18T20:21:55,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41917 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:21:55,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:21:55,154 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:21:55,154 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:21:55,154 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:21:55,154 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:21:55,154 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:21:55,154 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:21:55,154 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:21:55,154 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961315154Disabling compacts and flushes for region at 1731961315154Disabling writes for close at 1731961315154Writing region close event to WAL at 1731961315154Closed at 1731961315154 2024-11-18T20:21:55,155 WARN [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/.initializing 2024-11-18T20:21:55,155 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/WALs/5a964fc427ed,37903,1731961314859 2024-11-18T20:21:55,159 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5a964fc427ed%2C37903%2C1731961314859, suffix=, logDir=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/WALs/5a964fc427ed,37903,1731961314859, archiveDir=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/oldWALs, maxLogs=10 2024-11-18T20:21:55,159 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C37903%2C1731961314859.1731961315159 2024-11-18T20:21:55,165 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/WALs/5a964fc427ed,37903,1731961314859/5a964fc427ed%2C37903%2C1731961314859.1731961315159 2024-11-18T20:21:55,166 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45167:45167),(127.0.0.1/127.0.0.1:40299:40299)] 2024-11-18T20:21:55,166 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:21:55,166 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:21:55,167 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:55,167 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:55,171 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:55,173 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T20:21:55,173 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:55,173 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:21:55,173 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:55,175 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T20:21:55,175 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:55,176 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:21:55,176 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:55,177 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T20:21:55,177 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:55,178 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:21:55,178 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:55,180 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T20:21:55,180 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:55,180 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:21:55,181 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:55,181 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:55,182 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:55,183 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:55,183 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:55,184 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T20:21:55,185 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:21:55,187 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:21:55,188 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=876815, jitterRate=0.11492888629436493}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T20:21:55,189 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731961315167Initializing all the Stores at 1731961315168 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961315168Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961315170 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961315170Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961315171 (+1 ms)Cleaning up temporary data from old regions at 1731961315183 (+12 ms)Region opened successfully at 1731961315188 (+5 ms) 2024-11-18T20:21:55,189 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T20:21:55,193 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57418543, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5a964fc427ed/172.17.0.2:0 2024-11-18T20:21:55,194 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T20:21:55,194 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T20:21:55,194 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T20:21:55,194 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T20:21:55,195 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T20:21:55,195 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T20:21:55,195 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T20:21:55,197 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T20:21:55,199 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T20:21:55,210 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T20:21:55,210 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T20:21:55,211 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T20:21:55,218 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T20:21:55,218 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T20:21:55,220 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T20:21:55,249 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T20:21:55,250 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T20:21:55,260 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T20:21:55,262 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T20:21:55,304 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T20:21:55,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:21:55,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38299-0x10150ca23c40001, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:21:55,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:55,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38299-0x10150ca23c40001, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:55,319 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5a964fc427ed,37903,1731961314859, sessionid=0x10150ca23c40000, setting cluster-up flag (Was=false) 2024-11-18T20:21:55,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:55,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38299-0x10150ca23c40001, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:55,360 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T20:21:55,362 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5a964fc427ed,37903,1731961314859 2024-11-18T20:21:55,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:55,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38299-0x10150ca23c40001, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:55,393 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:21:55,393 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T20:21:55,394 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-18T20:21:55,402 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T20:21:55,405 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5a964fc427ed,37903,1731961314859 2024-11-18T20:21:55,408 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T20:21:55,410 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T20:21:55,411 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T20:21:55,411 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T20:21:55,411 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5a964fc427ed,37903,1731961314859 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T20:21:55,412 INFO [RS:0;5a964fc427ed:38299 {}] regionserver.HRegionServer(746): ClusterId : 8e3c65a6-53a5-4b12-9d35-9a0ec6eb6ff7 2024-11-18T20:21:55,412 DEBUG [RS:0;5a964fc427ed:38299 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T20:21:55,413 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:21:55,413 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:21:55,413 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:21:55,413 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:21:55,413 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5a964fc427ed:0, corePoolSize=10, maxPoolSize=10 2024-11-18T20:21:55,413 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:55,413 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5a964fc427ed:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:21:55,413 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:55,414 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731961345414 2024-11-18T20:21:55,414 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T20:21:55,414 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T20:21:55,414 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T20:21:55,414 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T20:21:55,414 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T20:21:55,414 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T20:21:55,414 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:55,414 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T20:21:55,415 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T20:21:55,415 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T20:21:55,415 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:21:55,415 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T20:21:55,415 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T20:21:55,415 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T20:21:55,415 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.large.0-1731961315415,5,FailOnTimeoutGroup] 2024-11-18T20:21:55,415 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.small.0-1731961315415,5,FailOnTimeoutGroup] 2024-11-18T20:21:55,415 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:55,415 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T20:21:55,416 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:55,416 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:55,416 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:55,416 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T20:21:55,419 DEBUG [RS:0;5a964fc427ed:38299 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T20:21:55,419 DEBUG [RS:0;5a964fc427ed:38299 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T20:21:55,450 DEBUG [RS:0;5a964fc427ed:38299 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T20:21:55,450 DEBUG [RS:0;5a964fc427ed:38299 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18963023, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5a964fc427ed/172.17.0.2:0 2024-11-18T20:21:55,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:21:55,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41917 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:21:55,452 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T20:21:55,452 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf 2024-11-18T20:21:55,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:21:55,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41917 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:21:55,460 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:21:55,461 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:21:55,463 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:21:55,463 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:55,464 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:21:55,464 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:21:55,464 DEBUG [RS:0;5a964fc427ed:38299 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5a964fc427ed:38299 2024-11-18T20:21:55,464 INFO [RS:0;5a964fc427ed:38299 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T20:21:55,464 INFO [RS:0;5a964fc427ed:38299 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T20:21:55,464 DEBUG [RS:0;5a964fc427ed:38299 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T20:21:55,465 INFO [RS:0;5a964fc427ed:38299 {}] regionserver.HRegionServer(2659): reportForDuty to master=5a964fc427ed,37903,1731961314859 with port=38299, startcode=1731961314990 2024-11-18T20:21:55,465 DEBUG [RS:0;5a964fc427ed:38299 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T20:21:55,465 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:21:55,466 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:55,466 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:21:55,467 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:21:55,468 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37449, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T20:21:55,469 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:21:55,469 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:55,469 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37903 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5a964fc427ed,38299,1731961314990 2024-11-18T20:21:55,469 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37903 {}] master.ServerManager(517): Registering regionserver=5a964fc427ed,38299,1731961314990 2024-11-18T20:21:55,470 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:21:55,470 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:21:55,472 DEBUG [RS:0;5a964fc427ed:38299 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf 2024-11-18T20:21:55,472 DEBUG [RS:0;5a964fc427ed:38299 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33409 2024-11-18T20:21:55,472 DEBUG [RS:0;5a964fc427ed:38299 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T20:21:55,472 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:21:55,472 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:55,473 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:21:55,473 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:21:55,474 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/hbase/meta/1588230740 2024-11-18T20:21:55,474 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/hbase/meta/1588230740 2024-11-18T20:21:55,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:21:55,476 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:21:55,476 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:21:55,477 DEBUG [RS:0;5a964fc427ed:38299 {}] zookeeper.ZKUtil(111): regionserver:38299-0x10150ca23c40001, quorum=127.0.0.1:58477, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5a964fc427ed,38299,1731961314990 2024-11-18T20:21:55,477 WARN [RS:0;5a964fc427ed:38299 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:21:55,477 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:21:55,477 INFO [RS:0;5a964fc427ed:38299 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:21:55,477 DEBUG [RS:0;5a964fc427ed:38299 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990 2024-11-18T20:21:55,478 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:21:55,482 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5a964fc427ed,38299,1731961314990] 2024-11-18T20:21:55,483 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:21:55,484 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=772234, jitterRate=-0.018054813146591187}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:21:55,485 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731961315460Initializing all the Stores at 1731961315461 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961315461Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961315461Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961315461Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961315461Cleaning up temporary data from old regions at 1731961315476 (+15 ms)Region opened successfully at 1731961315484 (+8 ms) 2024-11-18T20:21:55,485 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:21:55,485 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:21:55,485 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:21:55,485 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:21:55,485 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:21:55,485 INFO [RS:0;5a964fc427ed:38299 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T20:21:55,485 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:21:55,486 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961315485Disabling compacts and flushes for region at 1731961315485Disabling writes for close at 1731961315485Writing region close event to WAL at 1731961315485Closed at 1731961315485 2024-11-18T20:21:55,487 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:21:55,487 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T20:21:55,487 INFO [RS:0;5a964fc427ed:38299 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T20:21:55,487 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T20:21:55,488 INFO [RS:0;5a964fc427ed:38299 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T20:21:55,488 INFO [RS:0;5a964fc427ed:38299 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:55,488 INFO [RS:0;5a964fc427ed:38299 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T20:21:55,489 INFO [RS:0;5a964fc427ed:38299 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T20:21:55,489 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:21:55,489 INFO [RS:0;5a964fc427ed:38299 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:55,490 DEBUG [RS:0;5a964fc427ed:38299 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:55,490 DEBUG [RS:0;5a964fc427ed:38299 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:55,490 DEBUG [RS:0;5a964fc427ed:38299 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:55,490 DEBUG [RS:0;5a964fc427ed:38299 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:55,490 DEBUG [RS:0;5a964fc427ed:38299 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:55,490 DEBUG [RS:0;5a964fc427ed:38299 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5a964fc427ed:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:21:55,490 DEBUG [RS:0;5a964fc427ed:38299 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:55,490 DEBUG [RS:0;5a964fc427ed:38299 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:55,490 DEBUG [RS:0;5a964fc427ed:38299 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:55,490 DEBUG [RS:0;5a964fc427ed:38299 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:55,490 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T20:21:55,491 DEBUG [RS:0;5a964fc427ed:38299 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:55,491 DEBUG [RS:0;5a964fc427ed:38299 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:55,491 DEBUG [RS:0;5a964fc427ed:38299 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5a964fc427ed:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:21:55,491 DEBUG [RS:0;5a964fc427ed:38299 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:21:55,491 INFO [RS:0;5a964fc427ed:38299 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:55,491 INFO [RS:0;5a964fc427ed:38299 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:55,491 INFO [RS:0;5a964fc427ed:38299 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:55,491 INFO [RS:0;5a964fc427ed:38299 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:55,492 INFO [RS:0;5a964fc427ed:38299 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:55,492 INFO [RS:0;5a964fc427ed:38299 {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,38299,1731961314990-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:21:55,506 INFO [RS:0;5a964fc427ed:38299 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T20:21:55,506 INFO [RS:0;5a964fc427ed:38299 {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,38299,1731961314990-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:55,506 INFO [RS:0;5a964fc427ed:38299 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:55,506 INFO [RS:0;5a964fc427ed:38299 {}] regionserver.Replication(171): 5a964fc427ed,38299,1731961314990 started 2024-11-18T20:21:55,520 INFO [RS:0;5a964fc427ed:38299 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:55,521 INFO [RS:0;5a964fc427ed:38299 {}] regionserver.HRegionServer(1482): Serving as 5a964fc427ed,38299,1731961314990, RpcServer on 5a964fc427ed/172.17.0.2:38299, sessionid=0x10150ca23c40001 2024-11-18T20:21:55,521 DEBUG [RS:0;5a964fc427ed:38299 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T20:21:55,521 DEBUG [RS:0;5a964fc427ed:38299 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5a964fc427ed,38299,1731961314990 2024-11-18T20:21:55,521 DEBUG [RS:0;5a964fc427ed:38299 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5a964fc427ed,38299,1731961314990' 2024-11-18T20:21:55,521 DEBUG [RS:0;5a964fc427ed:38299 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T20:21:55,522 DEBUG [RS:0;5a964fc427ed:38299 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T20:21:55,522 DEBUG [RS:0;5a964fc427ed:38299 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T20:21:55,522 DEBUG [RS:0;5a964fc427ed:38299 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T20:21:55,522 DEBUG [RS:0;5a964fc427ed:38299 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5a964fc427ed,38299,1731961314990 2024-11-18T20:21:55,522 DEBUG [RS:0;5a964fc427ed:38299 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5a964fc427ed,38299,1731961314990' 2024-11-18T20:21:55,522 DEBUG [RS:0;5a964fc427ed:38299 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T20:21:55,523 DEBUG [RS:0;5a964fc427ed:38299 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T20:21:55,523 DEBUG [RS:0;5a964fc427ed:38299 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T20:21:55,523 INFO [RS:0;5a964fc427ed:38299 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T20:21:55,523 INFO [RS:0;5a964fc427ed:38299 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T20:21:55,626 INFO [RS:0;5a964fc427ed:38299 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5a964fc427ed%2C38299%2C1731961314990, suffix=, logDir=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990, archiveDir=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/oldWALs, maxLogs=32 2024-11-18T20:21:55,627 INFO [RS:0;5a964fc427ed:38299 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C38299%2C1731961314990.1731961315627 2024-11-18T20:21:55,635 INFO [RS:0;5a964fc427ed:38299 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961315627 2024-11-18T20:21:55,638 DEBUG [RS:0;5a964fc427ed:38299 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40299:40299),(127.0.0.1/127.0.0.1:45167:45167)] 2024-11-18T20:21:55,641 WARN [5a964fc427ed:37903 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T20:21:55,891 DEBUG [5a964fc427ed:37903 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T20:21:55,892 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5a964fc427ed,38299,1731961314990 2024-11-18T20:21:55,893 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5a964fc427ed,38299,1731961314990, state=OPENING 2024-11-18T20:21:55,926 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T20:21:55,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38299-0x10150ca23c40001, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:55,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:21:55,936 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:21:55,937 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:21:55,938 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:21:55,938 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5a964fc427ed,38299,1731961314990}] 2024-11-18T20:21:56,093 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T20:21:56,095 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60891, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T20:21:56,100 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T20:21:56,100 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:21:56,103 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5a964fc427ed%2C38299%2C1731961314990.meta, suffix=.meta, logDir=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990, archiveDir=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/oldWALs, maxLogs=32 2024-11-18T20:21:56,104 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta 2024-11-18T20:21:56,111 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta 2024-11-18T20:21:56,118 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45167:45167),(127.0.0.1/127.0.0.1:40299:40299)] 2024-11-18T20:21:56,120 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:21:56,120 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T20:21:56,121 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T20:21:56,121 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T20:21:56,121 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T20:21:56,121 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:21:56,121 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T20:21:56,121 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T20:21:56,123 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:21:56,124 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:21:56,124 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:56,124 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:21:56,124 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:21:56,125 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:21:56,125 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:56,126 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:21:56,126 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:21:56,127 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:21:56,127 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:56,128 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:21:56,128 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:21:56,129 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:21:56,129 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:56,129 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:21:56,129 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:21:56,130 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/hbase/meta/1588230740 2024-11-18T20:21:56,132 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/hbase/meta/1588230740 2024-11-18T20:21:56,133 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:21:56,133 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:21:56,133 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:21:56,135 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:21:56,136 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=834698, jitterRate=0.0613742470741272}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:21:56,136 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T20:21:56,136 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731961316121Writing region info on filesystem at 1731961316121Initializing all the Stores at 1731961316122 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961316122Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961316123 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961316123Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961316123Cleaning up temporary data from old regions at 1731961316133 (+10 ms)Running coprocessor post-open hooks at 1731961316136 (+3 ms)Region opened successfully at 1731961316136 2024-11-18T20:21:56,137 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731961316093 2024-11-18T20:21:56,140 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T20:21:56,140 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T20:21:56,141 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5a964fc427ed,38299,1731961314990 2024-11-18T20:21:56,143 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5a964fc427ed,38299,1731961314990, state=OPEN 2024-11-18T20:21:56,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:21:56,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38299-0x10150ca23c40001, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:21:56,173 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5a964fc427ed,38299,1731961314990 2024-11-18T20:21:56,174 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:21:56,174 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:21:56,177 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T20:21:56,178 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5a964fc427ed,38299,1731961314990 in 236 msec 2024-11-18T20:21:56,180 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T20:21:56,180 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 690 msec 2024-11-18T20:21:56,181 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:21:56,181 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T20:21:56,183 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:21:56,183 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5a964fc427ed,38299,1731961314990, seqNum=-1] 2024-11-18T20:21:56,184 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:21:56,185 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52385, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:21:56,193 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 782 msec 2024-11-18T20:21:56,193 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731961316193, completionTime=-1 2024-11-18T20:21:56,193 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T20:21:56,194 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T20:21:56,196 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T20:21:56,196 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731961376196 2024-11-18T20:21:56,196 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731961436196 2024-11-18T20:21:56,196 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-18T20:21:56,197 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,37903,1731961314859-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:56,197 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,37903,1731961314859-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:56,197 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,37903,1731961314859-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:56,197 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5a964fc427ed:37903, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:56,197 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:56,197 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:56,199 DEBUG [master/5a964fc427ed:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T20:21:56,201 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.149sec 2024-11-18T20:21:56,202 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T20:21:56,202 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T20:21:56,202 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T20:21:56,202 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T20:21:56,202 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T20:21:56,202 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,37903,1731961314859-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:21:56,202 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,37903,1731961314859-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T20:21:56,205 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T20:21:56,206 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T20:21:56,206 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,37903,1731961314859-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:56,213 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21e18edd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:21:56,213 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5a964fc427ed,37903,-1 for getting cluster id 2024-11-18T20:21:56,214 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T20:21:56,215 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e3c65a6-53a5-4b12-9d35-9a0ec6eb6ff7' 2024-11-18T20:21:56,216 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T20:21:56,216 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e3c65a6-53a5-4b12-9d35-9a0ec6eb6ff7" 2024-11-18T20:21:56,216 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f5effb1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:21:56,216 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5a964fc427ed,37903,-1] 2024-11-18T20:21:56,217 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T20:21:56,217 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:21:56,219 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43906, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T20:21:56,220 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@344f4078, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:21:56,220 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:21:56,221 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5a964fc427ed,38299,1731961314990, seqNum=-1] 2024-11-18T20:21:56,222 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:21:56,223 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55404, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:21:56,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5a964fc427ed,37903,1731961314859 2024-11-18T20:21:56,226 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:21:56,229 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T20:21:56,246 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5a964fc427ed:0 server-side Connection retries=45 2024-11-18T20:21:56,246 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:21:56,246 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:21:56,246 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:21:56,246 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:21:56,247 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:21:56,247 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T20:21:56,247 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:21:56,249 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35553 2024-11-18T20:21:56,252 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35553 connecting to ZooKeeper ensemble=127.0.0.1:58477 2024-11-18T20:21:56,253 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:21:56,258 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:21:56,277 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:355530x0, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:21:56,277 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-18T20:21:56,277 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35553-0x10150ca23c40002 connected 2024-11-18T20:21:56,277 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:35553-0x10150ca23c40002, quorum=127.0.0.1:58477, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-18T20:21:56,278 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T20:21:56,281 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T20:21:56,282 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:35553-0x10150ca23c40002, quorum=127.0.0.1:58477, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T20:21:56,284 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35553-0x10150ca23c40002, quorum=127.0.0.1:58477, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:21:56,287 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35553 2024-11-18T20:21:56,290 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35553 2024-11-18T20:21:56,290 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35553 2024-11-18T20:21:56,292 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35553 2024-11-18T20:21:56,292 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35553 2024-11-18T20:21:56,294 INFO [RS:1;5a964fc427ed:35553 {}] regionserver.HRegionServer(746): ClusterId : 8e3c65a6-53a5-4b12-9d35-9a0ec6eb6ff7 2024-11-18T20:21:56,294 DEBUG [RS:1;5a964fc427ed:35553 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T20:21:56,302 DEBUG [RS:1;5a964fc427ed:35553 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T20:21:56,302 DEBUG [RS:1;5a964fc427ed:35553 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T20:21:56,310 DEBUG [RS:1;5a964fc427ed:35553 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T20:21:56,311 DEBUG [RS:1;5a964fc427ed:35553 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@860d097, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5a964fc427ed/172.17.0.2:0 2024-11-18T20:21:56,323 DEBUG [RS:1;5a964fc427ed:35553 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;5a964fc427ed:35553 2024-11-18T20:21:56,323 INFO [RS:1;5a964fc427ed:35553 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T20:21:56,323 INFO [RS:1;5a964fc427ed:35553 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T20:21:56,323 DEBUG [RS:1;5a964fc427ed:35553 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T20:21:56,324 INFO [RS:1;5a964fc427ed:35553 {}] regionserver.HRegionServer(2659): reportForDuty to master=5a964fc427ed,37903,1731961314859 with port=35553, startcode=1731961316245 2024-11-18T20:21:56,324 DEBUG [RS:1;5a964fc427ed:35553 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T20:21:56,326 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42525, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T20:21:56,327 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37903 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5a964fc427ed,35553,1731961316245 2024-11-18T20:21:56,327 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37903 {}] master.ServerManager(517): Registering regionserver=5a964fc427ed,35553,1731961316245 2024-11-18T20:21:56,329 DEBUG [RS:1;5a964fc427ed:35553 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf 2024-11-18T20:21:56,329 DEBUG [RS:1;5a964fc427ed:35553 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33409 2024-11-18T20:21:56,329 DEBUG [RS:1;5a964fc427ed:35553 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T20:21:56,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:21:56,335 DEBUG [RS:1;5a964fc427ed:35553 {}] zookeeper.ZKUtil(111): regionserver:35553-0x10150ca23c40002, quorum=127.0.0.1:58477, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5a964fc427ed,35553,1731961316245 2024-11-18T20:21:56,335 WARN [RS:1;5a964fc427ed:35553 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:21:56,335 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5a964fc427ed,35553,1731961316245] 2024-11-18T20:21:56,335 INFO [RS:1;5a964fc427ed:35553 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:21:56,336 DEBUG [RS:1;5a964fc427ed:35553 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245 2024-11-18T20:21:56,339 INFO [RS:1;5a964fc427ed:35553 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T20:21:56,341 INFO [RS:1;5a964fc427ed:35553 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T20:21:56,341 INFO [RS:1;5a964fc427ed:35553 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T20:21:56,342 INFO [RS:1;5a964fc427ed:35553 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:56,342 INFO [RS:1;5a964fc427ed:35553 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T20:21:56,343 INFO [RS:1;5a964fc427ed:35553 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T20:21:56,343 INFO [RS:1;5a964fc427ed:35553 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:56,343 DEBUG [RS:1;5a964fc427ed:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:56,343 DEBUG [RS:1;5a964fc427ed:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:56,343 DEBUG [RS:1;5a964fc427ed:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:56,343 DEBUG [RS:1;5a964fc427ed:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:56,343 DEBUG [RS:1;5a964fc427ed:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:56,344 DEBUG [RS:1;5a964fc427ed:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5a964fc427ed:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:21:56,344 DEBUG [RS:1;5a964fc427ed:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:56,344 DEBUG [RS:1;5a964fc427ed:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:56,344 DEBUG [RS:1;5a964fc427ed:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:56,344 DEBUG [RS:1;5a964fc427ed:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:56,344 DEBUG [RS:1;5a964fc427ed:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:56,344 DEBUG [RS:1;5a964fc427ed:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:21:56,344 DEBUG [RS:1;5a964fc427ed:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5a964fc427ed:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:21:56,344 DEBUG [RS:1;5a964fc427ed:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:21:56,346 INFO [RS:1;5a964fc427ed:35553 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:56,346 INFO [RS:1;5a964fc427ed:35553 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:56,346 INFO [RS:1;5a964fc427ed:35553 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:56,346 INFO [RS:1;5a964fc427ed:35553 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:56,346 INFO [RS:1;5a964fc427ed:35553 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:56,346 INFO [RS:1;5a964fc427ed:35553 {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,35553,1731961316245-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:21:56,363 INFO [RS:1;5a964fc427ed:35553 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T20:21:56,363 INFO [RS:1;5a964fc427ed:35553 {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,35553,1731961316245-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:56,363 INFO [RS:1;5a964fc427ed:35553 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:56,364 INFO [RS:1;5a964fc427ed:35553 {}] regionserver.Replication(171): 5a964fc427ed,35553,1731961316245 started 2024-11-18T20:21:56,379 INFO [RS:1;5a964fc427ed:35553 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:21:56,379 INFO [RS:1;5a964fc427ed:35553 {}] regionserver.HRegionServer(1482): Serving as 5a964fc427ed,35553,1731961316245, RpcServer on 5a964fc427ed/172.17.0.2:35553, sessionid=0x10150ca23c40002 2024-11-18T20:21:56,379 DEBUG [RS:1;5a964fc427ed:35553 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T20:21:56,379 DEBUG [RS:1;5a964fc427ed:35553 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5a964fc427ed,35553,1731961316245 2024-11-18T20:21:56,379 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;5a964fc427ed:35553,5,FailOnTimeoutGroup] 2024-11-18T20:21:56,379 DEBUG [RS:1;5a964fc427ed:35553 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5a964fc427ed,35553,1731961316245' 2024-11-18T20:21:56,379 DEBUG [RS:1;5a964fc427ed:35553 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T20:21:56,380 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-18T20:21:56,380 DEBUG [RS:1;5a964fc427ed:35553 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T20:21:56,380 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T20:21:56,381 DEBUG [RS:1;5a964fc427ed:35553 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T20:21:56,381 DEBUG [RS:1;5a964fc427ed:35553 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T20:21:56,381 DEBUG [RS:1;5a964fc427ed:35553 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5a964fc427ed,35553,1731961316245 2024-11-18T20:21:56,381 DEBUG [RS:1;5a964fc427ed:35553 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5a964fc427ed,35553,1731961316245' 2024-11-18T20:21:56,381 DEBUG [RS:1;5a964fc427ed:35553 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T20:21:56,382 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 5a964fc427ed,37903,1731961314859 2024-11-18T20:21:56,382 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3410d2ef 2024-11-18T20:21:56,382 DEBUG [RS:1;5a964fc427ed:35553 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T20:21:56,382 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T20:21:56,384 DEBUG [RS:1;5a964fc427ed:35553 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T20:21:56,384 INFO [RS:1;5a964fc427ed:35553 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T20:21:56,384 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43914, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T20:21:56,384 INFO [RS:1;5a964fc427ed:35553 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T20:21:56,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37903 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T20:21:56,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37903 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T20:21:56,386 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37903 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:21:56,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37903 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-18T20:21:56,389 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T20:21:56,390 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:56,390 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37903 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-18T20:21:56,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37903 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T20:21:56,391 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T20:21:56,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41917 is added to blk_1073741835_1011 (size=393) 2024-11-18T20:21:56,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741835_1011 (size=393) 2024-11-18T20:21:56,404 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ef2268c469d4746ed529b52e9f45c475, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf 2024-11-18T20:21:56,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741836_1012 (size=76) 2024-11-18T20:21:56,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41917 is added to blk_1073741836_1012 (size=76) 2024-11-18T20:21:56,418 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:21:56,418 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing ef2268c469d4746ed529b52e9f45c475, disabling compactions & flushes 2024-11-18T20:21:56,418 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475. 2024-11-18T20:21:56,418 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475. 2024-11-18T20:21:56,418 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475. after waiting 0 ms 2024-11-18T20:21:56,418 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475. 2024-11-18T20:21:56,418 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475. 2024-11-18T20:21:56,418 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for ef2268c469d4746ed529b52e9f45c475: Waiting for close lock at 1731961316418Disabling compacts and flushes for region at 1731961316418Disabling writes for close at 1731961316418Writing region close event to WAL at 1731961316418Closed at 1731961316418 2024-11-18T20:21:56,420 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T20:21:56,421 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731961316420"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731961316420"}]},"ts":"1731961316420"} 2024-11-18T20:21:56,424 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T20:21:56,426 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T20:21:56,426 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731961316426"}]},"ts":"1731961316426"} 2024-11-18T20:21:56,429 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-18T20:21:56,429 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ef2268c469d4746ed529b52e9f45c475, ASSIGN}] 2024-11-18T20:21:56,432 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ef2268c469d4746ed529b52e9f45c475, ASSIGN 2024-11-18T20:21:56,434 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ef2268c469d4746ed529b52e9f45c475, ASSIGN; state=OFFLINE, location=5a964fc427ed,38299,1731961314990; forceNewPlan=false, retain=false 2024-11-18T20:21:56,487 INFO [RS:1;5a964fc427ed:35553 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5a964fc427ed%2C35553%2C1731961316245, suffix=, logDir=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245, archiveDir=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/oldWALs, maxLogs=32 2024-11-18T20:21:56,488 INFO [RS:1;5a964fc427ed:35553 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C35553%2C1731961316245.1731961316487 2024-11-18T20:21:56,499 INFO [RS:1;5a964fc427ed:35553 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 2024-11-18T20:21:56,501 DEBUG [RS:1;5a964fc427ed:35553 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45167:45167),(127.0.0.1/127.0.0.1:40299:40299)] 2024-11-18T20:21:56,585 INFO [5a964fc427ed:37903 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-18T20:21:56,585 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ef2268c469d4746ed529b52e9f45c475, regionState=OPENING, regionLocation=5a964fc427ed,38299,1731961314990 2024-11-18T20:21:56,589 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ef2268c469d4746ed529b52e9f45c475, ASSIGN because future has completed 2024-11-18T20:21:56,590 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ef2268c469d4746ed529b52e9f45c475, server=5a964fc427ed,38299,1731961314990}] 2024-11-18T20:21:56,750 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475. 2024-11-18T20:21:56,750 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ef2268c469d4746ed529b52e9f45c475, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:21:56,751 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath ef2268c469d4746ed529b52e9f45c475 2024-11-18T20:21:56,751 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:21:56,751 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ef2268c469d4746ed529b52e9f45c475 2024-11-18T20:21:56,751 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ef2268c469d4746ed529b52e9f45c475 2024-11-18T20:21:56,758 INFO [StoreOpener-ef2268c469d4746ed529b52e9f45c475-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ef2268c469d4746ed529b52e9f45c475 2024-11-18T20:21:56,760 INFO [StoreOpener-ef2268c469d4746ed529b52e9f45c475-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ef2268c469d4746ed529b52e9f45c475 columnFamilyName info 2024-11-18T20:21:56,760 DEBUG [StoreOpener-ef2268c469d4746ed529b52e9f45c475-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:21:56,761 INFO [StoreOpener-ef2268c469d4746ed529b52e9f45c475-1 {}] regionserver.HStore(327): Store=ef2268c469d4746ed529b52e9f45c475/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:21:56,761 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ef2268c469d4746ed529b52e9f45c475 2024-11-18T20:21:56,762 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475 2024-11-18T20:21:56,763 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475 2024-11-18T20:21:56,763 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ef2268c469d4746ed529b52e9f45c475 2024-11-18T20:21:56,763 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ef2268c469d4746ed529b52e9f45c475 2024-11-18T20:21:56,765 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ef2268c469d4746ed529b52e9f45c475 2024-11-18T20:21:56,769 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:21:56,770 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ef2268c469d4746ed529b52e9f45c475; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=702354, jitterRate=-0.1069110631942749}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T20:21:56,770 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ef2268c469d4746ed529b52e9f45c475 2024-11-18T20:21:56,771 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ef2268c469d4746ed529b52e9f45c475: Running coprocessor pre-open hook at 1731961316752Writing region info on filesystem at 1731961316752Initializing all the Stores at 1731961316757 (+5 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961316757Cleaning up temporary data from old regions at 1731961316763 (+6 ms)Running coprocessor post-open hooks at 1731961316770 (+7 ms)Region opened successfully at 1731961316771 (+1 ms) 2024-11-18T20:21:56,773 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475., pid=6, masterSystemTime=1731961316744 2024-11-18T20:21:56,776 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475. 2024-11-18T20:21:56,776 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475. 2024-11-18T20:21:56,777 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ef2268c469d4746ed529b52e9f45c475, regionState=OPEN, openSeqNum=2, regionLocation=5a964fc427ed,38299,1731961314990 2024-11-18T20:21:56,781 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ef2268c469d4746ed529b52e9f45c475, server=5a964fc427ed,38299,1731961314990 because future has completed 2024-11-18T20:21:56,788 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T20:21:56,788 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ef2268c469d4746ed529b52e9f45c475, server=5a964fc427ed,38299,1731961314990 in 195 msec 2024-11-18T20:21:56,792 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T20:21:56,793 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ef2268c469d4746ed529b52e9f45c475, ASSIGN in 359 msec 2024-11-18T20:21:56,794 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T20:21:56,795 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731961316794"}]},"ts":"1731961316794"} 2024-11-18T20:21:56,797 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-18T20:21:56,799 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T20:21:56,803 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 413 msec 2024-11-18T20:21:56,926 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T20:21:56,928 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:21:56,951 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:21:56,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:21:56,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:01,486 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-18T20:22:02,255 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T20:22:02,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:02,279 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:02,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:02,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:05,393 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T20:22:05,393 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-18T20:22:05,394 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-18T20:22:05,394 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-18T20:22:05,395 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:22:05,395 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-18T20:22:06,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37903 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T20:22:06,405 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-18T20:22:06,405 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-18T20:22:06,410 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-18T20:22:06,410 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475. 2024-11-18T20:22:06,428 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:22:06,432 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:22:06,434 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:22:06,434 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:22:06,434 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:22:06,435 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cac6b83{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:22:06,435 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5dc4ef73{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:22:06,532 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@65429201{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/java.io.tmpdir/jetty-localhost-45751-hadoop-hdfs-3_4_1-tests_jar-_-any-11121126897157480377/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:22:06,533 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4cbb1003{HTTP/1.1, (http/1.1)}{localhost:45751} 2024-11-18T20:22:06,533 INFO [Time-limited test {}] server.Server(415): Started @118024ms 2024-11-18T20:22:06,535 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:22:06,573 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:22:06,577 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:22:06,578 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:22:06,578 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:22:06,578 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:22:06,579 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3eb2cdd3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:22:06,580 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bea65f7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:22:06,679 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@70a0068f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/java.io.tmpdir/jetty-localhost-38937-hadoop-hdfs-3_4_1-tests_jar-_-any-9811497694390035519/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:22:06,679 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@39615ad8{HTTP/1.1, (http/1.1)}{localhost:38937} 2024-11-18T20:22:06,679 INFO [Time-limited test {}] server.Server(415): Started @118171ms 2024-11-18T20:22:06,681 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:22:06,722 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:22:06,725 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:22:06,726 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:22:06,726 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:22:06,726 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:22:06,726 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@264a677b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:22:06,727 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@158a9d8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:22:06,840 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5ec5aa68{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/java.io.tmpdir/jetty-localhost-41115-hadoop-hdfs-3_4_1-tests_jar-_-any-15560287015714297070/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:22:06,840 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@393eb51f{HTTP/1.1, (http/1.1)}{localhost:41115} 2024-11-18T20:22:06,840 INFO [Time-limited test {}] server.Server(415): Started @118331ms 2024-11-18T20:22:06,841 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:22:07,195 WARN [Thread-860 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data6/current/BP-1540393714-172.17.0.2-1731961313479/current, will proceed with Du for space computation calculation, 2024-11-18T20:22:07,195 WARN [Thread-859 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data5/current/BP-1540393714-172.17.0.2-1731961313479/current, will proceed with Du for space computation calculation, 2024-11-18T20:22:07,229 WARN [Thread-802 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:22:07,233 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x445624f123bbeeb0 with lease ID 0x3ead2f9b91647e90: Processing first storage report for DS-d0680cf5-7a70-4b30-83db-7921f3439c93 from datanode DatanodeRegistration(127.0.0.1:40087, datanodeUuid=444b2f8f-adaa-4f36-aee7-d5f1d9cf7937, infoPort=41125, infoSecurePort=0, ipcPort=35443, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479) 2024-11-18T20:22:07,233 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x445624f123bbeeb0 with lease ID 0x3ead2f9b91647e90: from storage DS-d0680cf5-7a70-4b30-83db-7921f3439c93 node DatanodeRegistration(127.0.0.1:40087, datanodeUuid=444b2f8f-adaa-4f36-aee7-d5f1d9cf7937, infoPort=41125, infoSecurePort=0, ipcPort=35443, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:22:07,233 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x445624f123bbeeb0 with lease ID 0x3ead2f9b91647e90: Processing first storage report for DS-f1eaee3d-5840-4815-b54d-f0b21290c1c9 from datanode DatanodeRegistration(127.0.0.1:40087, datanodeUuid=444b2f8f-adaa-4f36-aee7-d5f1d9cf7937, infoPort=41125, infoSecurePort=0, ipcPort=35443, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479) 2024-11-18T20:22:07,233 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x445624f123bbeeb0 with lease ID 0x3ead2f9b91647e90: from storage DS-f1eaee3d-5840-4815-b54d-f0b21290c1c9 node DatanodeRegistration(127.0.0.1:40087, datanodeUuid=444b2f8f-adaa-4f36-aee7-d5f1d9cf7937, infoPort=41125, infoSecurePort=0, ipcPort=35443, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:22:07,372 WARN [Thread-873 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data8/current/BP-1540393714-172.17.0.2-1731961313479/current, will proceed with Du for space computation calculation, 2024-11-18T20:22:07,372 WARN [Thread-872 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data7/current/BP-1540393714-172.17.0.2-1731961313479/current, will proceed with Du for space computation calculation, 2024-11-18T20:22:07,400 WARN [Thread-824 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:22:07,403 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x68fffbdd9c342a9f with lease ID 0x3ead2f9b91647e91: Processing first storage report for DS-d0c04b81-74a9-4beb-838d-f4120bdc9626 from datanode DatanodeRegistration(127.0.0.1:36939, datanodeUuid=e390d4ff-d603-4b31-9651-1aad4b52f898, infoPort=34449, infoSecurePort=0, ipcPort=36153, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479) 2024-11-18T20:22:07,404 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x68fffbdd9c342a9f with lease ID 0x3ead2f9b91647e91: from storage DS-d0c04b81-74a9-4beb-838d-f4120bdc9626 node DatanodeRegistration(127.0.0.1:36939, datanodeUuid=e390d4ff-d603-4b31-9651-1aad4b52f898, infoPort=34449, infoSecurePort=0, ipcPort=36153, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:22:07,404 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x68fffbdd9c342a9f with lease ID 0x3ead2f9b91647e91: Processing first storage report for DS-1c96cc68-9a58-4adb-866b-08f42792e171 from datanode DatanodeRegistration(127.0.0.1:36939, datanodeUuid=e390d4ff-d603-4b31-9651-1aad4b52f898, infoPort=34449, infoSecurePort=0, ipcPort=36153, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479) 2024-11-18T20:22:07,404 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x68fffbdd9c342a9f with lease ID 0x3ead2f9b91647e91: from storage DS-1c96cc68-9a58-4adb-866b-08f42792e171 node DatanodeRegistration(127.0.0.1:36939, datanodeUuid=e390d4ff-d603-4b31-9651-1aad4b52f898, infoPort=34449, infoSecurePort=0, ipcPort=36153, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:22:07,505 WARN [Thread-883 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data9/current/BP-1540393714-172.17.0.2-1731961313479/current, will proceed with Du for space computation calculation, 2024-11-18T20:22:07,505 WARN [Thread-884 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data10/current/BP-1540393714-172.17.0.2-1731961313479/current, will proceed with Du for space computation calculation, 2024-11-18T20:22:07,524 WARN [Thread-847 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:22:07,527 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcdf15a77b05d5a3b with lease ID 0x3ead2f9b91647e92: Processing first storage report for DS-fd8e96a5-f259-40da-bba6-470f04c9a02a from datanode DatanodeRegistration(127.0.0.1:35323, datanodeUuid=7e79b95f-a97a-4837-9097-54ac82a00e7b, infoPort=46185, infoSecurePort=0, ipcPort=39707, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479) 2024-11-18T20:22:07,527 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcdf15a77b05d5a3b with lease ID 0x3ead2f9b91647e92: from storage DS-fd8e96a5-f259-40da-bba6-470f04c9a02a node DatanodeRegistration(127.0.0.1:35323, datanodeUuid=7e79b95f-a97a-4837-9097-54ac82a00e7b, infoPort=46185, infoSecurePort=0, ipcPort=39707, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:22:07,527 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcdf15a77b05d5a3b with lease ID 0x3ead2f9b91647e92: Processing first storage report for DS-6736d15f-2aec-49f2-92db-fec668539e11 from datanode DatanodeRegistration(127.0.0.1:35323, datanodeUuid=7e79b95f-a97a-4837-9097-54ac82a00e7b, infoPort=46185, infoSecurePort=0, ipcPort=39707, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479) 2024-11-18T20:22:07,527 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcdf15a77b05d5a3b with lease ID 0x3ead2f9b91647e92: from storage DS-6736d15f-2aec-49f2-92db-fec668539e11 node DatanodeRegistration(127.0.0.1:35323, datanodeUuid=7e79b95f-a97a-4837-9097-54ac82a00e7b, infoPort=46185, infoSecurePort=0, ipcPort=39707, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:22:07,575 WARN [ResponseProcessor for block BP-1540393714-172.17.0.2-1731961313479:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1540393714-172.17.0.2-1731961313479:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:07,575 WARN [ResponseProcessor for block BP-1540393714-172.17.0.2-1731961313479:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1540393714-172.17.0.2-1731961313479:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:07,575 WARN [ResponseProcessor for block BP-1540393714-172.17.0.2-1731961313479:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1540393714-172.17.0.2-1731961313479:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:07,576 WARN [ResponseProcessor for block BP-1540393714-172.17.0.2-1731961313479:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1540393714-172.17.0.2-1731961313479:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1540393714-172.17.0.2-1731961313479:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:07,577 WARN [DataStreamer for file /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 block BP-1540393714-172.17.0.2-1731961313479:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK], DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK]) is bad. 2024-11-18T20:22:07,577 WARN [DataStreamer for file /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961315627 block BP-1540393714-172.17.0.2-1731961313479:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK], DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK]) is bad. 2024-11-18T20:22:07,577 WARN [DataStreamer for file /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta block BP-1540393714-172.17.0.2-1731961313479:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK], DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK]) is bad. 2024-11-18T20:22:07,578 WARN [DataStreamer for file /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/WALs/5a964fc427ed,37903,1731961314859/5a964fc427ed%2C37903%2C1731961314859.1731961315159 block BP-1540393714-172.17.0.2-1731961313479:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK], DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK]) is bad. 2024-11-18T20:22:07,577 WARN [PacketResponder: BP-1540393714-172.17.0.2-1731961313479:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36171] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:07,580 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:49088 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41917:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49088 dst: /127.0.0.1:41917 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:07,580 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:49104 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41917:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49104 dst: /127.0.0.1:41917 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:07,580 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1795605375_22 at /127.0.0.1:40840 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:36171:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40840 dst: /127.0.0.1:36171 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:07,580 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:40800 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36171:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40800 dst: /127.0.0.1:36171 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:07,580 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_986252936_22 at /127.0.0.1:40752 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36171:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40752 dst: /127.0.0.1:36171 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:07,581 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_986252936_22 at /127.0.0.1:49052 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41917:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49052 dst: /127.0.0.1:41917 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:07,581 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1795605375_22 at /127.0.0.1:49130 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:41917:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49130 dst: /127.0.0.1:41917 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:07,581 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:40788 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36171:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40788 dst: /127.0.0.1:36171 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:07,583 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6cd7b3e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:22:07,583 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ff95875{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:22:07,583 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:22:07,584 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32403ac6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:22:07,584 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f7f19bf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/hadoop.log.dir/,STOPPED} 2024-11-18T20:22:07,585 WARN [BP-1540393714-172.17.0.2-1731961313479 heartbeating to localhost/127.0.0.1:33409 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:22:07,585 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:22:07,585 WARN [BP-1540393714-172.17.0.2-1731961313479 heartbeating to localhost/127.0.0.1:33409 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1540393714-172.17.0.2-1731961313479 (Datanode Uuid 668a6dfd-9553-4de0-a7a1-42a381a2d7bc) service to localhost/127.0.0.1:33409 2024-11-18T20:22:07,585 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:22:07,586 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data3/current/BP-1540393714-172.17.0.2-1731961313479 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:22:07,586 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data4/current/BP-1540393714-172.17.0.2-1731961313479 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:22:07,586 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:22:07,587 WARN [DataStreamer for file /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta block BP-1540393714-172.17.0.2-1731961313479:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:07,589 WARN [DataStreamer for file /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/WALs/5a964fc427ed,37903,1731961314859/5a964fc427ed%2C37903%2C1731961314859.1731961315159 block BP-1540393714-172.17.0.2-1731961313479:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:07,592 WARN [DataStreamer for file /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 block BP-1540393714-172.17.0.2-1731961313479:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:07,592 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@7f5bb144 {}] datanode.DataXceiver(331): 127.0.0.1:41917:DataXceiver error processing unknown operation src: /127.0.0.1:60862 dst: /127.0.0.1:41917 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:07,592 WARN [DataStreamer for file /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961315627 block BP-1540393714-172.17.0.2-1731961313479:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:07,593 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@c77eea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:22:07,593 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1e20426d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:22:07,593 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:22:07,593 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bf32f74{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:22:07,593 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2cb9bebc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/hadoop.log.dir/,STOPPED} 2024-11-18T20:22:07,594 WARN [BP-1540393714-172.17.0.2-1731961313479 heartbeating to localhost/127.0.0.1:33409 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:22:07,594 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:22:07,594 WARN [BP-1540393714-172.17.0.2-1731961313479 heartbeating to localhost/127.0.0.1:33409 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1540393714-172.17.0.2-1731961313479 (Datanode Uuid 37be7c4f-1f71-471c-8a95-e3131df48058) service to localhost/127.0.0.1:33409 2024-11-18T20:22:07,594 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:22:07,595 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data1/current/BP-1540393714-172.17.0.2-1731961313479 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:22:07,595 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data2/current/BP-1540393714-172.17.0.2-1731961313479 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:22:07,595 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:22:07,599 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475., hostname=5a964fc427ed,38299,1731961314990, seqNum=2] 2024-11-18T20:22:07,600 ERROR [FSHLog-0-hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf-prefix:5a964fc427ed,38299,1731961314990 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:07,600 WARN [FSHLog-0-hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf-prefix:5a964fc427ed,38299,1731961314990 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:07,601 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5a964fc427ed%2C38299%2C1731961314990:(num 1731961315627) roll requested 2024-11-18T20:22:07,601 INFO [regionserver/5a964fc427ed:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C38299%2C1731961314990.1731961327601 2024-11-18T20:22:07,603 WARN [Thread-895 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:07,604 WARN [Thread-895 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK], DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]) is bad. 2024-11-18T20:22:07,604 WARN [Thread-895 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741838_1018 2024-11-18T20:22:07,606 WARN [Thread-895 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK] 2024-11-18T20:22:07,609 WARN [Thread-895 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1019 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:07,610 WARN [Thread-895 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK], DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK]) is bad. 2024-11-18T20:22:07,610 WARN [Thread-895 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741839_1019 2024-11-18T20:22:07,610 WARN [Thread-895 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK] 2024-11-18T20:22:07,614 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:07,614 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:07,615 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:07,615 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:07,615 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:07,615 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961315627 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961327601 2024-11-18T20:22:07,615 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:07,616 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:07,616 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46185:46185),(127.0.0.1/127.0.0.1:34449:34449)] 2024-11-18T20:22:07,616 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961315627 is not closed yet, will try archiving it next time 2024-11-18T20:22:07,617 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-18T20:22:07,617 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-18T20:22:07,617 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961315627 2024-11-18T20:22:07,620 WARN [IPC Server handler 0 on default port 33409 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961315627 has not been closed. Lease recovery is in progress. RecoveryId = 1021 for block blk_1073741833_1009 2024-11-18T20:22:07,624 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961315627 after 5ms 2024-11-18T20:22:08,345 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:09,133 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:09,616 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:09,618 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961327601 2024-11-18T20:22:09,620 WARN [ResponseProcessor for block BP-1540393714-172.17.0.2-1731961313479:blk_1073741840_1020 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1540393714-172.17.0.2-1731961313479:blk_1073741840_1020 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:09,621 WARN [DataStreamer for file /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961327601 block BP-1540393714-172.17.0.2-1731961313479:blk_1073741840_1020 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741840_1020 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK], DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]) is bad. 2024-11-18T20:22:09,622 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:35720 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741840_1020] {}] datanode.DataXceiver(331): 127.0.0.1:35323:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35720 dst: /127.0.0.1:35323 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:09,622 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:56574 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741840_1020] {}] datanode.DataXceiver(331): 127.0.0.1:36939:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56574 dst: /127.0.0.1:36939 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:09,655 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5ec5aa68{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:22:09,656 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@393eb51f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:22:09,656 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:22:09,656 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@158a9d8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:22:09,656 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@264a677b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/hadoop.log.dir/,STOPPED} 2024-11-18T20:22:09,658 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:22:09,658 WARN [BP-1540393714-172.17.0.2-1731961313479 heartbeating to localhost/127.0.0.1:33409 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:22:09,658 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:22:09,658 WARN [BP-1540393714-172.17.0.2-1731961313479 heartbeating to localhost/127.0.0.1:33409 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1540393714-172.17.0.2-1731961313479 (Datanode Uuid 7e79b95f-a97a-4837-9097-54ac82a00e7b) service to localhost/127.0.0.1:33409 2024-11-18T20:22:09,659 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data9/current/BP-1540393714-172.17.0.2-1731961313479 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:22:09,659 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data10/current/BP-1540393714-172.17.0.2-1731961313479 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:22:09,660 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:22:10,346 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:11,134 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:11,617 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:11,618 WARN [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]] 2024-11-18T20:22:11,618 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5a964fc427ed%2C38299%2C1731961314990:(num 1731961327601) roll requested 2024-11-18T20:22:11,619 INFO [regionserver/5a964fc427ed:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C38299%2C1731961314990.1731961331618 2024-11-18T20:22:11,624 WARN [Thread-904 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:11,625 WARN [Thread-904 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK], DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]) is bad. 2024-11-18T20:22:11,625 WARN [Thread-904 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741841_1023 2024-11-18T20:22:11,626 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961315627 after 4008ms 2024-11-18T20:22:11,626 WARN [Thread-904 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK] 2024-11-18T20:22:11,628 WARN [Thread-904 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:11,629 WARN [Thread-904 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK], DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]) is bad. 2024-11-18T20:22:11,629 WARN [Thread-904 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741842_1024 2024-11-18T20:22:11,630 WARN [Thread-904 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK] 2024-11-18T20:22:11,635 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:11,636 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:11,636 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:11,636 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:11,636 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:11,636 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961327601 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961331618 2024-11-18T20:22:11,637 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41125:41125),(127.0.0.1/127.0.0.1:34449:34449)] 2024-11-18T20:22:11,637 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961315627 is not closed yet, will try archiving it next time 2024-11-18T20:22:11,637 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961327601 is not closed yet, will try archiving it next time 2024-11-18T20:22:11,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36939 is added to blk_1073741840_1022 (size=2431) 2024-11-18T20:22:11,667 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-18T20:22:12,040 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961315627 is not closed yet, will try archiving it next time 2024-11-18T20:22:12,346 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:12,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741840_1022 (size=2431) 2024-11-18T20:22:13,135 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:13,637 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:13,673 WARN [ResponseProcessor for block BP-1540393714-172.17.0.2-1731961313479:blk_1073741843_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1540393714-172.17.0.2-1731961313479:blk_1073741843_1025 java.io.IOException: Bad response ERROR for BP-1540393714-172.17.0.2-1731961313479:blk_1073741843_1025 from datanode DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:13,673 WARN [DataStreamer for file /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961331618 block BP-1540393714-172.17.0.2-1731961313479:blk_1073741843_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK], DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]) is bad. 2024-11-18T20:22:13,674 WARN [PacketResponder: BP-1540393714-172.17.0.2-1731961313479:blk_1073741843_1025, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36939] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:13,674 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:40778 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:40087:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40778 dst: /127.0.0.1:40087 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:13,675 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:56590 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:36939:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56590 dst: /127.0.0.1:36939 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:13,676 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@70a0068f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:22:13,677 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@39615ad8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:22:13,677 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:22:13,677 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bea65f7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:22:13,677 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3eb2cdd3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/hadoop.log.dir/,STOPPED} 2024-11-18T20:22:13,679 WARN [BP-1540393714-172.17.0.2-1731961313479 heartbeating to localhost/127.0.0.1:33409 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:22:13,679 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:22:13,680 WARN [BP-1540393714-172.17.0.2-1731961313479 heartbeating to localhost/127.0.0.1:33409 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1540393714-172.17.0.2-1731961313479 (Datanode Uuid e390d4ff-d603-4b31-9651-1aad4b52f898) service to localhost/127.0.0.1:33409 2024-11-18T20:22:13,680 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:22:13,680 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data7/current/BP-1540393714-172.17.0.2-1731961313479 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:22:13,680 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data8/current/BP-1540393714-172.17.0.2-1731961313479 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:22:13,680 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:22:13,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38299 {}] regionserver.HRegion(8855): Flush requested on ef2268c469d4746ed529b52e9f45c475 2024-11-18T20:22:13,692 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ef2268c469d4746ed529b52e9f45c475 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T20:22:13,714 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/e440d3a165ae4935bb85bdd56323f7ca is 1080, key is row0002/info:/1731961329662/Put/seqid=0 2024-11-18T20:22:13,716 WARN [Thread-915 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:13,716 WARN [Thread-915 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK], DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]) is bad. 2024-11-18T20:22:13,716 WARN [Thread-915 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741844_1027 2024-11-18T20:22:13,717 WARN [Thread-915 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK] 2024-11-18T20:22:13,719 WARN [Thread-915 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:13,719 WARN [Thread-915 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK], DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK]) is bad. 2024-11-18T20:22:13,719 WARN [Thread-915 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741845_1028 2024-11-18T20:22:13,720 WARN [Thread-915 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK] 2024-11-18T20:22:13,723 WARN [Thread-915 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36939 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:13,723 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:40796 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data6]'}, localName='127.0.0.1:40087', datanodeUuid='444b2f8f-adaa-4f36-aee7-d5f1d9cf7937', xmitsInProgress=0}:Exception transferring block BP-1540393714-172.17.0.2-1731961313479:blk_1073741846_1029 to mirror 127.0.0.1:36939 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:13,723 WARN [Thread-915 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK], DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]) is bad. 2024-11-18T20:22:13,723 WARN [Thread-915 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741846_1029 2024-11-18T20:22:13,723 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:40796 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:22:13,723 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:40796 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:40087:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40796 dst: /127.0.0.1:40087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:13,724 WARN [Thread-915 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK] 2024-11-18T20:22:13,725 WARN [Thread-915 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:13,725 WARN [Thread-915 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK], DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]) is bad. 2024-11-18T20:22:13,725 WARN [Thread-915 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741847_1030 2024-11-18T20:22:13,726 WARN [Thread-915 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK] 2024-11-18T20:22:13,727 WARN [IPC Server handler 1 on default port 33409 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T20:22:13,727 WARN [IPC Server handler 1 on default port 33409 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T20:22:13,727 WARN [IPC Server handler 1 on default port 33409 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T20:22:13,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741848_1031 (size=10347) 2024-11-18T20:22:14,132 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/e440d3a165ae4935bb85bdd56323f7ca 2024-11-18T20:22:14,141 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/e440d3a165ae4935bb85bdd56323f7ca as hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/e440d3a165ae4935bb85bdd56323f7ca 2024-11-18T20:22:14,150 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/e440d3a165ae4935bb85bdd56323f7ca, entries=5, sequenceid=11, filesize=10.1 K 2024-11-18T20:22:14,151 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for ef2268c469d4746ed529b52e9f45c475 in 460ms, sequenceid=11, compaction requested=false 2024-11-18T20:22:14,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ef2268c469d4746ed529b52e9f45c475: 2024-11-18T20:22:14,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38299 {}] regionserver.HRegion(8855): Flush requested on ef2268c469d4746ed529b52e9f45c475 2024-11-18T20:22:14,326 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ef2268c469d4746ed529b52e9f45c475 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-18T20:22:14,334 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/3e16694c77794964a18fc9174056bbe5 is 1080, key is row0007/info:/1731961333693/Put/seqid=0 2024-11-18T20:22:14,337 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:14,337 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK], DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]) is bad. 2024-11-18T20:22:14,337 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741849_1032 2024-11-18T20:22:14,338 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK] 2024-11-18T20:22:14,339 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:14,340 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK], DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]) is bad. 2024-11-18T20:22:14,340 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741850_1033 2024-11-18T20:22:14,340 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK] 2024-11-18T20:22:14,342 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:14,342 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK], DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK]) is bad. 2024-11-18T20:22:14,342 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741851_1034 2024-11-18T20:22:14,343 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK] 2024-11-18T20:22:14,345 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36939 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:14,345 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54542 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741852_1035] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data6]'}, localName='127.0.0.1:40087', datanodeUuid='444b2f8f-adaa-4f36-aee7-d5f1d9cf7937', xmitsInProgress=0}:Exception transferring block BP-1540393714-172.17.0.2-1731961313479:blk_1073741852_1035 to mirror 127.0.0.1:36939 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:14,345 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK], DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]) is bad. 2024-11-18T20:22:14,345 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741852_1035 2024-11-18T20:22:14,345 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54542 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741852_1035] {}] datanode.BlockReceiver(316): Block 1073741852 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:22:14,345 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54542 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741852_1035] {}] datanode.DataXceiver(331): 127.0.0.1:40087:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54542 dst: /127.0.0.1:40087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:14,346 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK] 2024-11-18T20:22:14,346 WARN [IPC Server handler 0 on default port 33409 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T20:22:14,346 WARN [IPC Server handler 0 on default port 33409 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T20:22:14,346 WARN [IPC Server handler 0 on default port 33409 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T20:22:14,347 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:14,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741853_1036 (size=12506) 2024-11-18T20:22:14,750 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/3e16694c77794964a18fc9174056bbe5 2024-11-18T20:22:14,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/3e16694c77794964a18fc9174056bbe5 as hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/3e16694c77794964a18fc9174056bbe5 2024-11-18T20:22:14,767 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/3e16694c77794964a18fc9174056bbe5, entries=7, sequenceid=24, filesize=12.2 K 2024-11-18T20:22:14,768 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for ef2268c469d4746ed529b52e9f45c475 in 442ms, sequenceid=24, compaction requested=false 2024-11-18T20:22:14,768 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ef2268c469d4746ed529b52e9f45c475: 2024-11-18T20:22:14,768 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-18T20:22:14,768 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:22:14,768 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/3e16694c77794964a18fc9174056bbe5 because midkey is the same as first or last row 2024-11-18T20:22:15,135 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:15,237 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6dbd1900[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40087, datanodeUuid=444b2f8f-adaa-4f36-aee7-d5f1d9cf7937, infoPort=41125, infoSecurePort=0, ipcPort=35443, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479):Failed to transfer BP-1540393714-172.17.0.2-1731961313479:blk_1073741853_1036 to 127.0.0.1:36171 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:15,237 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@637afb1f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40087, datanodeUuid=444b2f8f-adaa-4f36-aee7-d5f1d9cf7937, infoPort=41125, infoSecurePort=0, ipcPort=35443, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479):Failed to transfer BP-1540393714-172.17.0.2-1731961313479:blk_1073741848_1031 to 127.0.0.1:41917 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:15,638 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:15,638 WARN [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK]] 2024-11-18T20:22:15,638 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5a964fc427ed%2C38299%2C1731961314990:(num 1731961331618) roll requested 2024-11-18T20:22:15,639 INFO [regionserver/5a964fc427ed:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C38299%2C1731961314990.1731961335638 2024-11-18T20:22:15,641 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:15,641 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK], DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]) is bad. 2024-11-18T20:22:15,642 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741854_1037 2024-11-18T20:22:15,642 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK] 2024-11-18T20:22:15,644 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41917 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:15,644 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54548 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data6]'}, localName='127.0.0.1:40087', datanodeUuid='444b2f8f-adaa-4f36-aee7-d5f1d9cf7937', xmitsInProgress=0}:Exception transferring block BP-1540393714-172.17.0.2-1731961313479:blk_1073741855_1038 to mirror 127.0.0.1:41917 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:15,645 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK], DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]) is bad. 2024-11-18T20:22:15,645 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54548 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-18T20:22:15,645 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741855_1038 2024-11-18T20:22:15,645 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54548 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:40087:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54548 dst: /127.0.0.1:40087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:15,646 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK] 2024-11-18T20:22:15,647 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:15,647 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK], DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]) is bad. 2024-11-18T20:22:15,647 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741856_1039 2024-11-18T20:22:15,648 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK] 2024-11-18T20:22:15,650 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:15,650 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK], DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK]) is bad. 2024-11-18T20:22:15,650 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741857_1040 2024-11-18T20:22:15,651 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK] 2024-11-18T20:22:15,651 WARN [IPC Server handler 4 on default port 33409 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T20:22:15,651 WARN [IPC Server handler 4 on default port 33409 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T20:22:15,651 WARN [IPC Server handler 4 on default port 33409 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T20:22:15,654 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:15,654 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:15,654 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:15,654 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:15,654 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:15,655 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961331618 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961335638 2024-11-18T20:22:15,655 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41125:41125)] 2024-11-18T20:22:15,656 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961315627 is not closed yet, will try archiving it next time 2024-11-18T20:22:15,656 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961331618 is not closed yet, will try archiving it next time 2024-11-18T20:22:15,656 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961327601 to hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/oldWALs/5a964fc427ed%2C38299%2C1731961314990.1731961327601 2024-11-18T20:22:15,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741843_1026 (size=25992) 2024-11-18T20:22:15,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38299 {}] regionserver.HRegion(8855): Flush requested on ef2268c469d4746ed529b52e9f45c475 2024-11-18T20:22:15,752 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ef2268c469d4746ed529b52e9f45c475 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-18T20:22:15,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/e6e1a6a389f94150bc3a2a7ffcfbb9dd is 1079, key is tmprow/info:/1731961335749/Put/seqid=0 2024-11-18T20:22:15,760 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:15,760 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK], DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]) is bad. 2024-11-18T20:22:15,760 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741859_1042 2024-11-18T20:22:15,761 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK] 2024-11-18T20:22:15,762 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:15,762 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK], DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]) is bad. 2024-11-18T20:22:15,762 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741860_1043 2024-11-18T20:22:15,762 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK] 2024-11-18T20:22:15,765 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36171 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:15,765 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54570 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data6]'}, localName='127.0.0.1:40087', datanodeUuid='444b2f8f-adaa-4f36-aee7-d5f1d9cf7937', xmitsInProgress=0}:Exception transferring block BP-1540393714-172.17.0.2-1731961313479:blk_1073741861_1044 to mirror 127.0.0.1:36171 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:15,765 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK], DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK]) is bad. 2024-11-18T20:22:15,765 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741861_1044 2024-11-18T20:22:15,765 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54570 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:22:15,765 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54570 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:40087:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54570 dst: /127.0.0.1:40087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:15,765 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK] 2024-11-18T20:22:15,768 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41917 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:15,768 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54572 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741862_1045] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data6]'}, localName='127.0.0.1:40087', datanodeUuid='444b2f8f-adaa-4f36-aee7-d5f1d9cf7937', xmitsInProgress=0}:Exception transferring block BP-1540393714-172.17.0.2-1731961313479:blk_1073741862_1045 to mirror 127.0.0.1:41917 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:15,768 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK], DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]) is bad. 2024-11-18T20:22:15,768 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741862_1045 2024-11-18T20:22:15,768 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54572 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741862_1045] {}] datanode.BlockReceiver(316): Block 1073741862 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:22:15,768 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54572 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741862_1045] {}] datanode.DataXceiver(331): 127.0.0.1:40087:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54572 dst: /127.0.0.1:40087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:15,769 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK] 2024-11-18T20:22:15,770 WARN [IPC Server handler 3 on default port 33409 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T20:22:15,770 WARN [IPC Server handler 3 on default port 33409 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T20:22:15,770 WARN [IPC Server handler 3 on default port 33409 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T20:22:15,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741863_1046 (size=6027) 2024-11-18T20:22:16,059 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961315627 is not closed yet, will try archiving it next time 2024-11-18T20:22:16,175 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/e6e1a6a389f94150bc3a2a7ffcfbb9dd 2024-11-18T20:22:16,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/e6e1a6a389f94150bc3a2a7ffcfbb9dd as hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/e6e1a6a389f94150bc3a2a7ffcfbb9dd 2024-11-18T20:22:16,189 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/e6e1a6a389f94150bc3a2a7ffcfbb9dd, entries=1, sequenceid=34, filesize=5.9 K 2024-11-18T20:22:16,190 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for ef2268c469d4746ed529b52e9f45c475 in 439ms, sequenceid=34, compaction requested=true 2024-11-18T20:22:16,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ef2268c469d4746ed529b52e9f45c475: 2024-11-18T20:22:16,190 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-18T20:22:16,190 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:22:16,191 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/3e16694c77794964a18fc9174056bbe5 because midkey is the same as first or last row 2024-11-18T20:22:16,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ef2268c469d4746ed529b52e9f45c475:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:22:16,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:22:16,191 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:22:16,192 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:22:16,193 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.HStore(1541): ef2268c469d4746ed529b52e9f45c475/info is initiating minor compaction (all files) 2024-11-18T20:22:16,193 INFO [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ef2268c469d4746ed529b52e9f45c475/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475. 2024-11-18T20:22:16,193 INFO [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/e440d3a165ae4935bb85bdd56323f7ca, hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/3e16694c77794964a18fc9174056bbe5, hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/e6e1a6a389f94150bc3a2a7ffcfbb9dd] into tmpdir=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp, totalSize=28.2 K 2024-11-18T20:22:16,193 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] compactions.Compactor(225): Compacting e440d3a165ae4935bb85bdd56323f7ca, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731961329662 2024-11-18T20:22:16,194 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3e16694c77794964a18fc9174056bbe5, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731961333693 2024-11-18T20:22:16,194 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] compactions.Compactor(225): Compacting e6e1a6a389f94150bc3a2a7ffcfbb9dd, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731961335749 2024-11-18T20:22:16,213 INFO [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ef2268c469d4746ed529b52e9f45c475#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:22:16,213 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/3c97f5bdb74e4009bb24c2c3c98b703c is 1080, key is row0002/info:/1731961329662/Put/seqid=0 2024-11-18T20:22:16,216 WARN [Thread-942 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35323 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:16,216 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54592 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741864_1047] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data6]'}, localName='127.0.0.1:40087', datanodeUuid='444b2f8f-adaa-4f36-aee7-d5f1d9cf7937', xmitsInProgress=0}:Exception transferring block BP-1540393714-172.17.0.2-1731961313479:blk_1073741864_1047 to mirror 127.0.0.1:35323 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:16,217 WARN [Thread-942 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK], DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]) is bad. 2024-11-18T20:22:16,217 WARN [Thread-942 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741864_1047 2024-11-18T20:22:16,217 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54592 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741864_1047] {}] datanode.BlockReceiver(316): Block 1073741864 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:22:16,217 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54592 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741864_1047] {}] datanode.DataXceiver(331): 127.0.0.1:40087:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54592 dst: /127.0.0.1:40087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:16,217 WARN [Thread-942 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK] 2024-11-18T20:22:16,218 WARN [Thread-942 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:16,219 WARN [Thread-942 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK], DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]) is bad. 2024-11-18T20:22:16,219 WARN [Thread-942 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741865_1048 2024-11-18T20:22:16,219 WARN [Thread-942 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK] 2024-11-18T20:22:16,221 WARN [Thread-942 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41917 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:16,221 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54598 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data6]'}, localName='127.0.0.1:40087', datanodeUuid='444b2f8f-adaa-4f36-aee7-d5f1d9cf7937', xmitsInProgress=0}:Exception transferring block BP-1540393714-172.17.0.2-1731961313479:blk_1073741866_1049 to mirror 127.0.0.1:41917 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:16,221 WARN [Thread-942 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK], DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]) is bad. 2024-11-18T20:22:16,221 WARN [Thread-942 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741866_1049 2024-11-18T20:22:16,221 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54598 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:22:16,221 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54598 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:40087:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54598 dst: /127.0.0.1:40087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:16,222 WARN [Thread-942 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK] 2024-11-18T20:22:16,224 WARN [Thread-942 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36171 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:16,224 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54602 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741867_1050] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data6]'}, localName='127.0.0.1:40087', datanodeUuid='444b2f8f-adaa-4f36-aee7-d5f1d9cf7937', xmitsInProgress=0}:Exception transferring block BP-1540393714-172.17.0.2-1731961313479:blk_1073741867_1050 to mirror 127.0.0.1:36171 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:16,224 WARN [Thread-942 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK], DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK]) is bad. 2024-11-18T20:22:16,224 WARN [Thread-942 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741867_1050 2024-11-18T20:22:16,224 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54602 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741867_1050] {}] datanode.BlockReceiver(316): Block 1073741867 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:22:16,224 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54602 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741867_1050] {}] datanode.DataXceiver(331): 127.0.0.1:40087:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54602 dst: /127.0.0.1:40087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:16,225 WARN [Thread-942 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK] 2024-11-18T20:22:16,226 WARN [IPC Server handler 0 on default port 33409 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T20:22:16,226 WARN [IPC Server handler 0 on default port 33409 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T20:22:16,226 WARN [IPC Server handler 0 on default port 33409 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T20:22:16,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741868_1051 (size=17994) 2024-11-18T20:22:16,347 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:16,639 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/3c97f5bdb74e4009bb24c2c3c98b703c as hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/3c97f5bdb74e4009bb24c2c3c98b703c 2024-11-18T20:22:16,646 INFO [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ef2268c469d4746ed529b52e9f45c475/info of ef2268c469d4746ed529b52e9f45c475 into 3c97f5bdb74e4009bb24c2c3c98b703c(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:22:16,646 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ef2268c469d4746ed529b52e9f45c475: 2024-11-18T20:22:16,646 INFO [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475., storeName=ef2268c469d4746ed529b52e9f45c475/info, priority=13, startTime=1731961336191; duration=0sec 2024-11-18T20:22:16,646 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-18T20:22:16,646 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:22:16,647 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/3c97f5bdb74e4009bb24c2c3c98b703c because midkey is the same as first or last row 2024-11-18T20:22:16,647 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-18T20:22:16,647 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:22:16,647 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/3c97f5bdb74e4009bb24c2c3c98b703c because midkey is the same as first or last row 2024-11-18T20:22:16,647 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-18T20:22:16,647 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:22:16,647 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/3c97f5bdb74e4009bb24c2c3c98b703c because midkey is the same as first or last row 2024-11-18T20:22:16,647 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:22:16,647 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ef2268c469d4746ed529b52e9f45c475:info 2024-11-18T20:22:17,136 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:17,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38299 {}] regionserver.HRegion(8855): Flush requested on ef2268c469d4746ed529b52e9f45c475 2024-11-18T20:22:17,176 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ef2268c469d4746ed529b52e9f45c475 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-18T20:22:17,182 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/c64d931685614ed9b7138f12a1a7cd63 is 1079, key is tmprow/info:/1731961337174/Put/seqid=0 2024-11-18T20:22:17,190 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:17,191 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK], DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK]) is bad. 2024-11-18T20:22:17,191 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741869_1052 2024-11-18T20:22:17,192 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK] 2024-11-18T20:22:17,200 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:17,201 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK], DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]) is bad. 2024-11-18T20:22:17,201 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741870_1053 2024-11-18T20:22:17,202 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK] 2024-11-18T20:22:17,207 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:17,207 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK], DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]) is bad. 2024-11-18T20:22:17,208 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741871_1054 2024-11-18T20:22:17,211 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK] 2024-11-18T20:22:17,220 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41917 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:17,220 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54618 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741872_1055] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data6]'}, localName='127.0.0.1:40087', datanodeUuid='444b2f8f-adaa-4f36-aee7-d5f1d9cf7937', xmitsInProgress=0}:Exception transferring block BP-1540393714-172.17.0.2-1731961313479:blk_1073741872_1055 to mirror 127.0.0.1:41917 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:17,220 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK], DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]) is bad. 2024-11-18T20:22:17,220 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741872_1055 2024-11-18T20:22:17,220 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54618 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741872_1055] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:22:17,220 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54618 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741872_1055] {}] datanode.DataXceiver(331): 127.0.0.1:40087:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54618 dst: /127.0.0.1:40087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:17,221 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK] 2024-11-18T20:22:17,222 WARN [IPC Server handler 2 on default port 33409 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T20:22:17,223 WARN [IPC Server handler 2 on default port 33409 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T20:22:17,225 WARN [IPC Server handler 2 on default port 33409 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T20:22:17,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741873_1056 (size=6027) 2024-11-18T20:22:17,638 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/c64d931685614ed9b7138f12a1a7cd63 2024-11-18T20:22:17,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/c64d931685614ed9b7138f12a1a7cd63 as hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/c64d931685614ed9b7138f12a1a7cd63 2024-11-18T20:22:17,655 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/c64d931685614ed9b7138f12a1a7cd63, entries=1, sequenceid=45, filesize=5.9 K 2024-11-18T20:22:17,656 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for ef2268c469d4746ed529b52e9f45c475 in 480ms, sequenceid=45, compaction requested=false 2024-11-18T20:22:17,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ef2268c469d4746ed529b52e9f45c475: 2024-11-18T20:22:17,656 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-18T20:22:17,656 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:22:17,656 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:17,656 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/3c97f5bdb74e4009bb24c2c3c98b703c because midkey is the same as first or last row 2024-11-18T20:22:17,656 WARN [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK]] 2024-11-18T20:22:17,656 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5a964fc427ed%2C38299%2C1731961314990:(num 1731961335638) roll requested 2024-11-18T20:22:17,657 INFO [regionserver/5a964fc427ed:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C38299%2C1731961314990.1731961337656 2024-11-18T20:22:17,660 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:17,660 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK], DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]) is bad. 2024-11-18T20:22:17,660 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741874_1057 2024-11-18T20:22:17,661 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK] 2024-11-18T20:22:17,663 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:17,663 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK], DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]) is bad. 2024-11-18T20:22:17,663 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741875_1058 2024-11-18T20:22:17,664 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK] 2024-11-18T20:22:17,665 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:17,665 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK], DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]) is bad. 2024-11-18T20:22:17,666 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741876_1059 2024-11-18T20:22:17,666 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK] 2024-11-18T20:22:17,668 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:17,668 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK], DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK]) is bad. 2024-11-18T20:22:17,668 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741877_1060 2024-11-18T20:22:17,669 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK] 2024-11-18T20:22:17,669 WARN [IPC Server handler 2 on default port 33409 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T20:22:17,669 WARN [IPC Server handler 2 on default port 33409 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T20:22:17,669 WARN [IPC Server handler 2 on default port 33409 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T20:22:17,673 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:17,673 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:17,674 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:17,674 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:17,674 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:17,674 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961335638 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961337656 2024-11-18T20:22:17,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741858_1041 (size=13591) 2024-11-18T20:22:17,686 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41125:41125)] 2024-11-18T20:22:17,686 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961315627 is not closed yet, will try archiving it next time 2024-11-18T20:22:17,686 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961335638 is not closed yet, will try archiving it next time 2024-11-18T20:22:17,686 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961331618 to hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/oldWALs/5a964fc427ed%2C38299%2C1731961314990.1731961331618 2024-11-18T20:22:18,077 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961315627 is not closed yet, will try archiving it next time 2024-11-18T20:22:18,238 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6dbd1900[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40087, datanodeUuid=444b2f8f-adaa-4f36-aee7-d5f1d9cf7937, infoPort=41125, infoSecurePort=0, ipcPort=35443, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479):Failed to transfer BP-1540393714-172.17.0.2-1731961313479:blk_1073741863_1046 to 127.0.0.1:41917 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:18,238 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@637afb1f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40087, datanodeUuid=444b2f8f-adaa-4f36-aee7-d5f1d9cf7937, infoPort=41125, infoSecurePort=0, ipcPort=35443, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479):Failed to transfer BP-1540393714-172.17.0.2-1731961313479:blk_1073741843_1026 to 127.0.0.1:35323 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:18,348 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:18,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38299 {}] regionserver.HRegion(8855): Flush requested on ef2268c469d4746ed529b52e9f45c475 2024-11-18T20:22:18,598 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ef2268c469d4746ed529b52e9f45c475 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-18T20:22:18,603 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/781eb742df2f4ff285b6fafd86862768 is 1079, key is tmprow/info:/1731961338597/Put/seqid=0 2024-11-18T20:22:18,605 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:18,605 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK], DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK]) is bad. 2024-11-18T20:22:18,605 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741879_1062 2024-11-18T20:22:18,605 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK] 2024-11-18T20:22:18,606 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:18,607 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK], DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]) is bad. 2024-11-18T20:22:18,607 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741880_1063 2024-11-18T20:22:18,607 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK] 2024-11-18T20:22:18,609 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35323 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:18,609 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54640 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741881_1064] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data6]'}, localName='127.0.0.1:40087', datanodeUuid='444b2f8f-adaa-4f36-aee7-d5f1d9cf7937', xmitsInProgress=0}:Exception transferring block BP-1540393714-172.17.0.2-1731961313479:blk_1073741881_1064 to mirror 127.0.0.1:35323 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:18,609 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK], DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]) is bad. 2024-11-18T20:22:18,609 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54640 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741881_1064] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:22:18,609 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741881_1064 2024-11-18T20:22:18,609 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54640 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741881_1064] {}] datanode.DataXceiver(331): 127.0.0.1:40087:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54640 dst: /127.0.0.1:40087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:18,610 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK] 2024-11-18T20:22:18,611 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:18,612 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK], DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]) is bad. 2024-11-18T20:22:18,612 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741882_1065 2024-11-18T20:22:18,612 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK] 2024-11-18T20:22:18,613 WARN [IPC Server handler 4 on default port 33409 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T20:22:18,613 WARN [IPC Server handler 4 on default port 33409 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T20:22:18,613 WARN [IPC Server handler 4 on default port 33409 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T20:22:18,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741883_1066 (size=6027) 2024-11-18T20:22:19,018 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/781eb742df2f4ff285b6fafd86862768 2024-11-18T20:22:19,025 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/781eb742df2f4ff285b6fafd86862768 as hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/781eb742df2f4ff285b6fafd86862768 2024-11-18T20:22:19,032 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/781eb742df2f4ff285b6fafd86862768, entries=1, sequenceid=55, filesize=5.9 K 2024-11-18T20:22:19,033 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for ef2268c469d4746ed529b52e9f45c475 in 435ms, sequenceid=55, compaction requested=true 2024-11-18T20:22:19,033 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ef2268c469d4746ed529b52e9f45c475: 2024-11-18T20:22:19,034 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-18T20:22:19,034 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:22:19,034 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/3c97f5bdb74e4009bb24c2c3c98b703c because midkey is the same as first or last row 2024-11-18T20:22:19,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ef2268c469d4746ed529b52e9f45c475:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:22:19,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:22:19,034 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:22:19,035 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:22:19,036 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.HStore(1541): ef2268c469d4746ed529b52e9f45c475/info is initiating minor compaction (all files) 2024-11-18T20:22:19,036 INFO [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ef2268c469d4746ed529b52e9f45c475/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475. 2024-11-18T20:22:19,036 INFO [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/3c97f5bdb74e4009bb24c2c3c98b703c, hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/c64d931685614ed9b7138f12a1a7cd63, hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/781eb742df2f4ff285b6fafd86862768] into tmpdir=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp, totalSize=29.3 K 2024-11-18T20:22:19,037 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3c97f5bdb74e4009bb24c2c3c98b703c, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731961329662 2024-11-18T20:22:19,037 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] compactions.Compactor(225): Compacting c64d931685614ed9b7138f12a1a7cd63, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731961337174 2024-11-18T20:22:19,037 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] compactions.Compactor(225): Compacting 781eb742df2f4ff285b6fafd86862768, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731961338597 2024-11-18T20:22:19,056 INFO [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ef2268c469d4746ed529b52e9f45c475#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:22:19,057 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/1e5b78d56cf740b0be7b44158d0a0652 is 1080, key is row0002/info:/1731961329662/Put/seqid=0 2024-11-18T20:22:19,059 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:19,060 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK], DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]) is bad. 2024-11-18T20:22:19,060 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741884_1067 2024-11-18T20:22:19,060 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK] 2024-11-18T20:22:19,062 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:19,062 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK], DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]) is bad. 2024-11-18T20:22:19,062 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741885_1068 2024-11-18T20:22:19,062 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK] 2024-11-18T20:22:19,065 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54654 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741886_1069] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data6]'}, localName='127.0.0.1:40087', datanodeUuid='444b2f8f-adaa-4f36-aee7-d5f1d9cf7937', xmitsInProgress=0}:Exception transferring block BP-1540393714-172.17.0.2-1731961313479:blk_1073741886_1069 to mirror 127.0.0.1:36171 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:19,065 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36171 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:19,065 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54654 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741886_1069] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:22:19,065 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK], DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK]) is bad. 2024-11-18T20:22:19,065 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741886_1069 2024-11-18T20:22:19,065 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54654 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741886_1069] {}] datanode.DataXceiver(331): 127.0.0.1:40087:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54654 dst: /127.0.0.1:40087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:19,066 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36171,DS-752959bd-3814-4dce-8848-a7add3965661,DISK] 2024-11-18T20:22:19,072 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35323 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:19,072 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54658 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741887_1070] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data6]'}, localName='127.0.0.1:40087', datanodeUuid='444b2f8f-adaa-4f36-aee7-d5f1d9cf7937', xmitsInProgress=0}:Exception transferring block BP-1540393714-172.17.0.2-1731961313479:blk_1073741887_1070 to mirror 127.0.0.1:35323 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:19,072 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK], DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]) is bad. 2024-11-18T20:22:19,072 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741887_1070 2024-11-18T20:22:19,072 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54658 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741887_1070] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:22:19,072 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:54658 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741887_1070] {}] datanode.DataXceiver(331): 127.0.0.1:40087:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54658 dst: /127.0.0.1:40087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:19,073 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK] 2024-11-18T20:22:19,074 WARN [IPC Server handler 3 on default port 33409 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T20:22:19,074 WARN [IPC Server handler 3 on default port 33409 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T20:22:19,074 WARN [IPC Server handler 3 on default port 33409 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T20:22:19,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741888_1071 (size=18097) 2024-11-18T20:22:19,090 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/1e5b78d56cf740b0be7b44158d0a0652 as hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/1e5b78d56cf740b0be7b44158d0a0652 2024-11-18T20:22:19,099 INFO [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ef2268c469d4746ed529b52e9f45c475/info of ef2268c469d4746ed529b52e9f45c475 into 1e5b78d56cf740b0be7b44158d0a0652(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:22:19,099 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ef2268c469d4746ed529b52e9f45c475: 2024-11-18T20:22:19,099 INFO [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475., storeName=ef2268c469d4746ed529b52e9f45c475/info, priority=13, startTime=1731961339034; duration=0sec 2024-11-18T20:22:19,099 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-18T20:22:19,099 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:22:19,099 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/1e5b78d56cf740b0be7b44158d0a0652 because midkey is the same as first or last row 2024-11-18T20:22:19,099 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-18T20:22:19,099 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:22:19,099 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/1e5b78d56cf740b0be7b44158d0a0652 because midkey is the same as first or last row 2024-11-18T20:22:19,099 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-18T20:22:19,099 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:22:19,099 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/1e5b78d56cf740b0be7b44158d0a0652 because midkey is the same as first or last row 2024-11-18T20:22:19,099 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:22:19,100 DEBUG [RS:0;5a964fc427ed:38299-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ef2268c469d4746ed529b52e9f45c475:info 2024-11-18T20:22:19,136 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:19,238 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6dbd1900[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40087, datanodeUuid=444b2f8f-adaa-4f36-aee7-d5f1d9cf7937, infoPort=41125, infoSecurePort=0, ipcPort=35443, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479):Failed to transfer BP-1540393714-172.17.0.2-1731961313479:blk_1073741873_1056 to 127.0.0.1:36939 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:19,238 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@637afb1f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40087, datanodeUuid=444b2f8f-adaa-4f36-aee7-d5f1d9cf7937, infoPort=41125, infoSecurePort=0, ipcPort=35443, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479):Failed to transfer BP-1540393714-172.17.0.2-1731961313479:blk_1073741868_1051 to 127.0.0.1:41917 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:19,686 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:19,687 WARN [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-18T20:22:19,826 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:22:19,831 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:22:19,841 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:22:19,841 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:22:19,841 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:22:19,842 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a8eeeb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:22:19,843 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6842affb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:22:19,979 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@25f6ac6c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/java.io.tmpdir/jetty-localhost-46331-hadoop-hdfs-3_4_1-tests_jar-_-any-16352313227079969179/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:22:19,980 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2586080f{HTTP/1.1, (http/1.1)}{localhost:46331} 2024-11-18T20:22:19,980 INFO [Time-limited test {}] server.Server(415): Started @131471ms 2024-11-18T20:22:19,982 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:22:20,313 WARN [Thread-984 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:22:20,321 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x375c16ee8779cd09 with lease ID 0x3ead2f9b91647e93: from storage DS-752959bd-3814-4dce-8848-a7add3965661 node DatanodeRegistration(127.0.0.1:32943, datanodeUuid=668a6dfd-9553-4de0-a7a1-42a381a2d7bc, infoPort=37841, infoSecurePort=0, ipcPort=44041, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:22:20,322 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x375c16ee8779cd09 with lease ID 0x3ead2f9b91647e93: from storage DS-4a13fe2d-9a3d-4aca-894a-3df24200b229 node DatanodeRegistration(127.0.0.1:32943, datanodeUuid=668a6dfd-9553-4de0-a7a1-42a381a2d7bc, infoPort=37841, infoSecurePort=0, ipcPort=44041, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:22:20,348 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:21,137 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:21,240 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6dbd1900[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40087, datanodeUuid=444b2f8f-adaa-4f36-aee7-d5f1d9cf7937, infoPort=41125, infoSecurePort=0, ipcPort=35443, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479):Failed to transfer BP-1540393714-172.17.0.2-1731961313479:blk_1073741883_1066 to 127.0.0.1:35323 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:21,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741858_1041 (size=13591) 2024-11-18T20:22:21,687 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:22,239 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@637afb1f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40087, datanodeUuid=444b2f8f-adaa-4f36-aee7-d5f1d9cf7937, infoPort=41125, infoSecurePort=0, ipcPort=35443, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479):Failed to transfer BP-1540393714-172.17.0.2-1731961313479:blk_1073741888_1071 to 127.0.0.1:35323 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:22,348 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:23,137 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:23,687 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:24,349 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:24,841 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T20:22:25,138 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:25,415 ERROR [FSHLog-0-hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData-prefix:5a964fc427ed,37903,1731961314859 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:25,416 WARN [FSHLog-0-hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData-prefix:5a964fc427ed,37903,1731961314859 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:25,416 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 5a964fc427ed%2C37903%2C1731961314859:(num 1731961315159) roll requested 2024-11-18T20:22:25,416 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C37903%2C1731961314859.1731961345416 2024-11-18T20:22:25,423 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:25,423 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:25,424 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:25,424 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:25,424 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:25,424 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/WALs/5a964fc427ed,37903,1731961314859/5a964fc427ed%2C37903%2C1731961314859.1731961315159 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/WALs/5a964fc427ed,37903,1731961314859/5a964fc427ed%2C37903%2C1731961314859.1731961345416 2024-11-18T20:22:25,425 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:25,425 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:25,425 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/WALs/5a964fc427ed,37903,1731961314859/5a964fc427ed%2C37903%2C1731961314859.1731961315159 2024-11-18T20:22:25,425 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41125:41125),(127.0.0.1/127.0.0.1:37841:37841)] 2024-11-18T20:22:25,425 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/WALs/5a964fc427ed,37903,1731961314859/5a964fc427ed%2C37903%2C1731961314859.1731961315159 is not closed yet, will try archiving it next time 2024-11-18T20:22:25,426 WARN [IPC Server handler 3 on default port 33409 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/WALs/5a964fc427ed,37903,1731961314859/5a964fc427ed%2C37903%2C1731961314859.1731961315159 has not been closed. Lease recovery is in progress. RecoveryId = 1073 for block blk_1073741830_1006 2024-11-18T20:22:25,426 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/WALs/5a964fc427ed,37903,1731961314859/5a964fc427ed%2C37903%2C1731961314859.1731961315159 after 1ms 2024-11-18T20:22:25,688 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:26,349 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:27,689 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:28,350 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:29,429 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/WALs/5a964fc427ed,37903,1731961314859/5a964fc427ed%2C37903%2C1731961314859.1731961315159 after 4004ms 2024-11-18T20:22:29,689 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:30,319 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@f705590[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32943, datanodeUuid=668a6dfd-9553-4de0-a7a1-42a381a2d7bc, infoPort=37841, infoSecurePort=0, ipcPort=44041, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479):Failed to transfer BP-1540393714-172.17.0.2-1731961313479:blk_1073741836_1012 to 127.0.0.1:36939 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:30,320 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@40757799[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32943, datanodeUuid=668a6dfd-9553-4de0-a7a1-42a381a2d7bc, infoPort=37841, infoSecurePort=0, ipcPort=44041, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479):Failed to transfer BP-1540393714-172.17.0.2-1731961313479:blk_1073741832_1008 to 127.0.0.1:35323 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:30,337 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3e04089a {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1540393714-172.17.0.2-1731961313479:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:41917,null,null]) java.net.ConnectException: Call From 5a964fc427ed/172.17.0.2 to localhost:38463 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-18T20:22:30,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741833_1021 (size=455) 2024-11-18T20:22:30,350 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:30,650 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961315627 to hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/oldWALs/5a964fc427ed%2C38299%2C1731961314990.1731961315627 2024-11-18T20:22:30,652 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961335638 to hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/oldWALs/5a964fc427ed%2C38299%2C1731961314990.1731961335638 2024-11-18T20:22:31,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:22:31,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:22:31,689 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:32,350 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:33,318 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@40757799[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32943, datanodeUuid=668a6dfd-9553-4de0-a7a1-42a381a2d7bc, infoPort=37841, infoSecurePort=0, ipcPort=44041, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479):Failed to transfer BP-1540393714-172.17.0.2-1731961313479:blk_1073741833_1021 to 127.0.0.1:36939 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:33,318 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@f705590[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32943, datanodeUuid=668a6dfd-9553-4de0-a7a1-42a381a2d7bc, infoPort=37841, infoSecurePort=0, ipcPort=44041, storageInfo=lv=-57;cid=testClusterID;nsid=2717690;c=1731961313479):Failed to transfer BP-1540393714-172.17.0.2-1731961313479:blk_1073741825_1001 to 127.0.0.1:36939 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:33,465 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C38299%2C1731961314990.1731961353465 2024-11-18T20:22:33,468 WARN [Thread-1018 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1074 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:33,468 WARN [Thread-1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741890_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK], DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]) is bad. 2024-11-18T20:22:33,469 WARN [Thread-1018 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741890_1074 2024-11-18T20:22:33,469 WARN [Thread-1018 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK] 2024-11-18T20:22:33,473 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:33,473 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:33,474 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:33,474 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:33,474 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:33,474 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961337656 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961353465 2024-11-18T20:22:33,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741878_1061 (size=12911) 2024-11-18T20:22:33,476 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37841:37841),(127.0.0.1/127.0.0.1:41125:41125)] 2024-11-18T20:22:33,476 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961337656 is not closed yet, will try archiving it next time 2024-11-18T20:22:33,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38299 {}] regionserver.HRegion(8855): Flush requested on ef2268c469d4746ed529b52e9f45c475 2024-11-18T20:22:33,481 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ef2268c469d4746ed529b52e9f45c475 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-18T20:22:33,488 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/35615be5c7404373b736b96a557cc4e4 is 1080, key is row0013/info:/1731961353477/Put/seqid=0 2024-11-18T20:22:33,490 WARN [Thread-1024 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:33,490 WARN [Thread-1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK], DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]) is bad. 2024-11-18T20:22:33,490 WARN [Thread-1024 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741892_1076 2024-11-18T20:22:33,491 WARN [Thread-1024 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK] 2024-11-18T20:22:33,492 WARN [Thread-1024 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:33,493 WARN [Thread-1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741893_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK], DatanodeInfoWithStorage[127.0.0.1:32943,DS-752959bd-3814-4dce-8848-a7add3965661,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]) is bad. 2024-11-18T20:22:33,493 WARN [Thread-1024 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741893_1077 2024-11-18T20:22:33,493 WARN [Thread-1024 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK] 2024-11-18T20:22:33,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741894_1078 (size=8190) 2024-11-18T20:22:33,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741894_1078 (size=8190) 2024-11-18T20:22:33,501 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/35615be5c7404373b736b96a557cc4e4 2024-11-18T20:22:33,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/35615be5c7404373b736b96a557cc4e4 as hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/35615be5c7404373b736b96a557cc4e4 2024-11-18T20:22:33,517 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/35615be5c7404373b736b96a557cc4e4, entries=3, sequenceid=66, filesize=8.0 K 2024-11-18T20:22:33,519 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for ef2268c469d4746ed529b52e9f45c475 in 37ms, sequenceid=66, compaction requested=false 2024-11-18T20:22:33,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ef2268c469d4746ed529b52e9f45c475: 2024-11-18T20:22:33,519 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-18T20:22:33,519 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:22:33,519 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/1e5b78d56cf740b0be7b44158d0a0652 because midkey is the same as first or last row 2024-11-18T20:22:33,690 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:33,690 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-18T20:22:33,706 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T20:22:33,707 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:22:33,707 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:22:33,707 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:22:33,707 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:22:33,707 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T20:22:33,707 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T20:22:33,707 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=886043761, stopped=false 2024-11-18T20:22:33,707 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5a964fc427ed,37903,1731961314859 2024-11-18T20:22:33,749 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35553-0x10150ca23c40002, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:22:33,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38299-0x10150ca23c40001, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:22:33,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38299-0x10150ca23c40001, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:22:33,749 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35553-0x10150ca23c40002, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:22:33,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:22:33,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:22:33,750 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:22:33,750 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38299-0x10150ca23c40001, quorum=127.0.0.1:58477, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:22:33,751 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35553-0x10150ca23c40002, quorum=127.0.0.1:58477, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:22:33,751 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:22:33,752 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:22:33,752 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:22:33,752 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:22:33,752 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5a964fc427ed,38299,1731961314990' ***** 2024-11-18T20:22:33,752 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T20:22:33,752 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5a964fc427ed,35553,1731961316245' ***** 2024-11-18T20:22:33,752 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T20:22:33,752 INFO [RS:0;5a964fc427ed:38299 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T20:22:33,752 INFO [RS:0;5a964fc427ed:38299 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T20:22:33,752 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T20:22:33,753 INFO [RS:0;5a964fc427ed:38299 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T20:22:33,753 INFO [RS:1;5a964fc427ed:35553 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T20:22:33,753 INFO [RS:0;5a964fc427ed:38299 {}] regionserver.HRegionServer(3091): Received CLOSE for ef2268c469d4746ed529b52e9f45c475 2024-11-18T20:22:33,754 INFO [RS:1;5a964fc427ed:35553 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T20:22:33,754 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T20:22:33,754 INFO [RS:1;5a964fc427ed:35553 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T20:22:33,754 INFO [RS:1;5a964fc427ed:35553 {}] regionserver.HRegionServer(959): stopping server 5a964fc427ed,35553,1731961316245 2024-11-18T20:22:33,754 INFO [RS:1;5a964fc427ed:35553 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:22:33,754 INFO [RS:1;5a964fc427ed:35553 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;5a964fc427ed:35553. 2024-11-18T20:22:33,754 DEBUG [RS:1;5a964fc427ed:35553 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:22:33,754 DEBUG [RS:1;5a964fc427ed:35553 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:22:33,754 INFO [RS:1;5a964fc427ed:35553 {}] regionserver.HRegionServer(976): stopping server 5a964fc427ed,35553,1731961316245; all regions closed. 2024-11-18T20:22:33,754 INFO [RS:0;5a964fc427ed:38299 {}] regionserver.HRegionServer(959): stopping server 5a964fc427ed,38299,1731961314990 2024-11-18T20:22:33,754 INFO [RS:0;5a964fc427ed:38299 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:22:33,754 INFO [RS:0;5a964fc427ed:38299 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5a964fc427ed:38299. 2024-11-18T20:22:33,754 DEBUG [RS:0;5a964fc427ed:38299 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:22:33,754 DEBUG [RS:0;5a964fc427ed:38299 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:22:33,754 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ef2268c469d4746ed529b52e9f45c475, disabling compactions & flushes 2024-11-18T20:22:33,755 INFO [RS:0;5a964fc427ed:38299 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T20:22:33,755 INFO [RS:0;5a964fc427ed:38299 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T20:22:33,755 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:33,755 INFO [RS:0;5a964fc427ed:38299 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T20:22:33,755 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475. 2024-11-18T20:22:33,755 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475. 2024-11-18T20:22:33,755 INFO [RS:0;5a964fc427ed:38299 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T20:22:33,755 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475. after waiting 0 ms 2024-11-18T20:22:33,755 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475. 2024-11-18T20:22:33,755 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:33,755 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:33,755 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing ef2268c469d4746ed529b52e9f45c475 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-18T20:22:33,755 INFO [RS:0;5a964fc427ed:38299 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-18T20:22:33,755 DEBUG [RS:0;5a964fc427ed:38299 {}] regionserver.HRegionServer(1325): Online Regions={ef2268c469d4746ed529b52e9f45c475=TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475., 1588230740=hbase:meta,,1.1588230740} 2024-11-18T20:22:33,755 DEBUG [RS:0;5a964fc427ed:38299 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, ef2268c469d4746ed529b52e9f45c475 2024-11-18T20:22:33,755 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:33,755 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:22:33,755 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:22:33,755 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:33,756 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:22:33,756 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:22:33,756 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:22:33,756 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:33,756 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-18T20:22:33,756 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:33,756 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 2024-11-18T20:22:33,757 ERROR [FSHLog-0-hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf-prefix:5a964fc427ed,38299,1731961314990.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:33,757 WARN [FSHLog-0-hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf-prefix:5a964fc427ed,38299,1731961314990.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:33,757 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5a964fc427ed%2C38299%2C1731961314990.meta:.meta(num 1731961316104) roll requested 2024-11-18T20:22:33,757 WARN [IPC Server handler 0 on default port 33409 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 has not been closed. Lease recovery is in progress. RecoveryId = 1079 for block blk_1073741837_1013 2024-11-18T20:22:33,757 INFO [regionserver/5a964fc427ed:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C38299%2C1731961314990.meta.1731961353757.meta 2024-11-18T20:22:33,758 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 after 1ms 2024-11-18T20:22:33,770 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/402d33abf15a4e2d9fb2fd1c4aa9d8d9 is 1080, key is row0015/info:/1731961353483/Put/seqid=0 2024-11-18T20:22:33,772 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1081 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:33,772 WARN [Thread-1032 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1080 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35323 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:33,772 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:59618 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741895_1080] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data4]'}, localName='127.0.0.1:32943', datanodeUuid='668a6dfd-9553-4de0-a7a1-42a381a2d7bc', xmitsInProgress=0}:Exception transferring block BP-1540393714-172.17.0.2-1731961313479:blk_1073741895_1080 to mirror 127.0.0.1:35323 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:33,772 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741896_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK], DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]) is bad. 2024-11-18T20:22:33,772 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741896_1081 2024-11-18T20:22:33,772 WARN [Thread-1032 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741895_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32943,DS-752959bd-3814-4dce-8848-a7add3965661,DISK], DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]) is bad. 2024-11-18T20:22:33,772 WARN [Thread-1032 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741895_1080 2024-11-18T20:22:33,773 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:59618 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741895_1080] {}] datanode.BlockReceiver(316): Block 1073741895 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-18T20:22:33,773 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:59618 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741895_1080] {}] datanode.DataXceiver(331): 127.0.0.1:32943:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59618 dst: /127.0.0.1:32943 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:33,773 WARN [Thread-1032 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK] 2024-11-18T20:22:33,773 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK] 2024-11-18T20:22:33,789 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:33,789 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:33,789 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:33,789 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:33,790 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:33,790 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961353757.meta 2024-11-18T20:22:33,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741898_1083 (size=14660) 2024-11-18T20:22:33,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741898_1083 (size=14660) 2024-11-18T20:22:33,802 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/402d33abf15a4e2d9fb2fd1c4aa9d8d9 2024-11-18T20:22:33,810 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:33,811 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41917,DS-3ca67442-7f0f-47c2-84b5-17dbd62cb838,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:33,811 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta 2024-11-18T20:22:33,811 WARN [IPC Server handler 0 on default port 33409 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta has not been closed. Lease recovery is in progress. RecoveryId = 1084 for block blk_1073741834_1010 2024-11-18T20:22:33,812 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta after 1ms 2024-11-18T20:22:33,812 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/.tmp/info/402d33abf15a4e2d9fb2fd1c4aa9d8d9 as hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/402d33abf15a4e2d9fb2fd1c4aa9d8d9 2024-11-18T20:22:33,822 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/402d33abf15a4e2d9fb2fd1c4aa9d8d9, entries=9, sequenceid=78, filesize=14.3 K 2024-11-18T20:22:33,823 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for ef2268c469d4746ed529b52e9f45c475 in 68ms, sequenceid=78, compaction requested=true 2024-11-18T20:22:33,826 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41125:41125),(127.0.0.1/127.0.0.1:37841:37841)] 2024-11-18T20:22:33,827 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta is not closed yet, will try archiving it next time 2024-11-18T20:22:33,831 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/e440d3a165ae4935bb85bdd56323f7ca, hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/3e16694c77794964a18fc9174056bbe5, hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/3c97f5bdb74e4009bb24c2c3c98b703c, hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/e6e1a6a389f94150bc3a2a7ffcfbb9dd, hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/c64d931685614ed9b7138f12a1a7cd63, hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/781eb742df2f4ff285b6fafd86862768] to archive 2024-11-18T20:22:33,832 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T20:22:33,840 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/e440d3a165ae4935bb85bdd56323f7ca to hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/e440d3a165ae4935bb85bdd56323f7ca 2024-11-18T20:22:33,842 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/3e16694c77794964a18fc9174056bbe5 to hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/3e16694c77794964a18fc9174056bbe5 2024-11-18T20:22:33,851 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/hbase/meta/1588230740/.tmp/info/74350070cad746a9a593b5744684daca is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475./info:regioninfo/1731961316777/Put/seqid=0 2024-11-18T20:22:33,851 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/3c97f5bdb74e4009bb24c2c3c98b703c to hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/3c97f5bdb74e4009bb24c2c3c98b703c 2024-11-18T20:22:33,853 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/e6e1a6a389f94150bc3a2a7ffcfbb9dd to hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/e6e1a6a389f94150bc3a2a7ffcfbb9dd 2024-11-18T20:22:33,856 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/c64d931685614ed9b7138f12a1a7cd63 to hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/c64d931685614ed9b7138f12a1a7cd63 2024-11-18T20:22:33,858 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/781eb742df2f4ff285b6fafd86862768 to hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/info/781eb742df2f4ff285b6fafd86862768 2024-11-18T20:22:33,859 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5a964fc427ed:37903 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-18T20:22:33,859 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [e440d3a165ae4935bb85bdd56323f7ca=10347, 3e16694c77794964a18fc9174056bbe5=12506, 3c97f5bdb74e4009bb24c2c3c98b703c=17994, e6e1a6a389f94150bc3a2a7ffcfbb9dd=6027, c64d931685614ed9b7138f12a1a7cd63=6027, 781eb742df2f4ff285b6fafd86862768=6027] 2024-11-18T20:22:33,860 WARN [Thread-1045 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36939 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:33,860 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:55674 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741899_1085] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data6]'}, localName='127.0.0.1:40087', datanodeUuid='444b2f8f-adaa-4f36-aee7-d5f1d9cf7937', xmitsInProgress=0}:Exception transferring block BP-1540393714-172.17.0.2-1731961313479:blk_1073741899_1085 to mirror 127.0.0.1:36939 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:33,860 WARN [Thread-1045 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK], DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK]) is bad. 2024-11-18T20:22:33,860 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:55674 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741899_1085] {}] datanode.BlockReceiver(316): Block 1073741899 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:22:33,860 WARN [Thread-1045 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741899_1085 2024-11-18T20:22:33,860 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:55674 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741899_1085] {}] datanode.DataXceiver(331): 127.0.0.1:40087:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55674 dst: /127.0.0.1:40087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:33,861 WARN [Thread-1045 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36939,DS-d0c04b81-74a9-4beb-838d-f4120bdc9626,DISK] 2024-11-18T20:22:33,866 WARN [Thread-1045 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1086 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35323 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:33,866 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:55686 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741900_1086] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data6]'}, localName='127.0.0.1:40087', datanodeUuid='444b2f8f-adaa-4f36-aee7-d5f1d9cf7937', xmitsInProgress=0}:Exception transferring block BP-1540393714-172.17.0.2-1731961313479:blk_1073741900_1086 to mirror 127.0.0.1:35323 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:33,867 WARN [Thread-1045 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1540393714-172.17.0.2-1731961313479:blk_1073741900_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40087,DS-d0680cf5-7a70-4b30-83db-7921f3439c93,DISK], DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK]) is bad. 2024-11-18T20:22:33,868 WARN [Thread-1045 {}] hdfs.DataStreamer(1850): Abandoning BP-1540393714-172.17.0.2-1731961313479:blk_1073741900_1086 2024-11-18T20:22:33,868 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:55686 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741900_1086] {}] datanode.BlockReceiver(316): Block 1073741900 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:22:33,868 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-190297126_22 at /127.0.0.1:55686 [Receiving block BP-1540393714-172.17.0.2-1731961313479:blk_1073741900_1086] {}] datanode.DataXceiver(331): 127.0.0.1:40087:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55686 dst: /127.0.0.1:40087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:33,868 WARN [Thread-1045 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35323,DS-fd8e96a5-f259-40da-bba6-470f04c9a02a,DISK] 2024-11-18T20:22:33,880 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.1731961337656 to hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/oldWALs/5a964fc427ed%2C38299%2C1731961314990.1731961337656 2024-11-18T20:22:33,887 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ef2268c469d4746ed529b52e9f45c475/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-11-18T20:22:33,888 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475. 2024-11-18T20:22:33,888 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ef2268c469d4746ed529b52e9f45c475: Waiting for close lock at 1731961353754Running coprocessor pre-close hooks at 1731961353754Disabling compacts and flushes for region at 1731961353754Disabling writes for close at 1731961353755 (+1 ms)Obtaining lock to block concurrent updates at 1731961353755Preparing flush snapshotting stores in ef2268c469d4746ed529b52e9f45c475 at 1731961353755Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1731961353755Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475. at 1731961353756 (+1 ms)Flushing ef2268c469d4746ed529b52e9f45c475/info: creating writer at 1731961353756Flushing ef2268c469d4746ed529b52e9f45c475/info: appending metadata at 1731961353770 (+14 ms)Flushing ef2268c469d4746ed529b52e9f45c475/info: closing flushed file at 1731961353770Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37bcdd66: reopening flushed file at 1731961353811 (+41 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for ef2268c469d4746ed529b52e9f45c475 in 68ms, sequenceid=78, compaction requested=true at 1731961353823 (+12 ms)Writing region close event to WAL at 1731961353862 (+39 ms)Running coprocessor post-close hooks at 1731961353888 (+26 ms)Closed at 1731961353888 2024-11-18T20:22:33,888 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731961316385.ef2268c469d4746ed529b52e9f45c475. 2024-11-18T20:22:33,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741901_1087 (size=7089) 2024-11-18T20:22:33,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741901_1087 (size=7089) 2024-11-18T20:22:33,955 DEBUG [RS:0;5a964fc427ed:38299 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T20:22:34,156 DEBUG [RS:0;5a964fc427ed:38299 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T20:22:34,293 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/hbase/meta/1588230740/.tmp/info/74350070cad746a9a593b5744684daca 2024-11-18T20:22:34,321 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/hbase/meta/1588230740/.tmp/ns/ab9aab1189814ed28a80eebd4be8ceee is 43, key is default/ns:d/1731961316186/Put/seqid=0 2024-11-18T20:22:34,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741902_1088 (size=5153) 2024-11-18T20:22:34,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741902_1088 (size=5153) 2024-11-18T20:22:34,340 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/hbase/meta/1588230740/.tmp/ns/ab9aab1189814ed28a80eebd4be8ceee 2024-11-18T20:22:34,354 INFO [regionserver/5a964fc427ed:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:22:34,356 DEBUG [RS:0;5a964fc427ed:38299 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T20:22:34,371 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/hbase/meta/1588230740/.tmp/table/2f1aee59c22c4451a05216c9aeca54ac is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731961316794/Put/seqid=0 2024-11-18T20:22:34,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741903_1089 (size=5424) 2024-11-18T20:22:34,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741903_1089 (size=5424) 2024-11-18T20:22:34,387 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/hbase/meta/1588230740/.tmp/table/2f1aee59c22c4451a05216c9aeca54ac 2024-11-18T20:22:34,396 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/hbase/meta/1588230740/.tmp/info/74350070cad746a9a593b5744684daca as hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/hbase/meta/1588230740/info/74350070cad746a9a593b5744684daca 2024-11-18T20:22:34,397 INFO [regionserver/5a964fc427ed:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T20:22:34,397 INFO [regionserver/5a964fc427ed:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T20:22:34,404 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/hbase/meta/1588230740/info/74350070cad746a9a593b5744684daca, entries=10, sequenceid=11, filesize=6.9 K 2024-11-18T20:22:34,406 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/hbase/meta/1588230740/.tmp/ns/ab9aab1189814ed28a80eebd4be8ceee as hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/hbase/meta/1588230740/ns/ab9aab1189814ed28a80eebd4be8ceee 2024-11-18T20:22:34,414 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/hbase/meta/1588230740/ns/ab9aab1189814ed28a80eebd4be8ceee, entries=2, sequenceid=11, filesize=5.0 K 2024-11-18T20:22:34,415 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/hbase/meta/1588230740/.tmp/table/2f1aee59c22c4451a05216c9aeca54ac as hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/hbase/meta/1588230740/table/2f1aee59c22c4451a05216c9aeca54ac 2024-11-18T20:22:34,423 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/hbase/meta/1588230740/table/2f1aee59c22c4451a05216c9aeca54ac, entries=2, sequenceid=11, filesize=5.3 K 2024-11-18T20:22:34,424 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 668ms, sequenceid=11, compaction requested=false 2024-11-18T20:22:34,431 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-18T20:22:34,432 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:22:34,432 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:22:34,432 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961353755Running coprocessor pre-close hooks at 1731961353755Disabling compacts and flushes for region at 1731961353755Disabling writes for close at 1731961353756 (+1 ms)Obtaining lock to block concurrent updates at 1731961353756Preparing flush snapshotting stores in 1588230740 at 1731961353756Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731961353757 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731961353828 (+71 ms)Flushing 1588230740/info: creating writer at 1731961353828Flushing 1588230740/info: appending metadata at 1731961353850 (+22 ms)Flushing 1588230740/info: closing flushed file at 1731961353850Flushing 1588230740/ns: creating writer at 1731961354301 (+451 ms)Flushing 1588230740/ns: appending metadata at 1731961354321 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1731961354321Flushing 1588230740/table: creating writer at 1731961354350 (+29 ms)Flushing 1588230740/table: appending metadata at 1731961354370 (+20 ms)Flushing 1588230740/table: closing flushed file at 1731961354370Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ae8c28c: reopening flushed file at 1731961354395 (+25 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41d6fd39: reopening flushed file at 1731961354405 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@352ff5f3: reopening flushed file at 1731961354414 (+9 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 668ms, sequenceid=11, compaction requested=false at 1731961354424 (+10 ms)Writing region close event to WAL at 1731961354427 (+3 ms)Running coprocessor post-close hooks at 1731961354432 (+5 ms)Closed at 1731961354432 2024-11-18T20:22:34,433 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T20:22:34,492 INFO [regionserver/5a964fc427ed:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T20:22:34,492 INFO [regionserver/5a964fc427ed:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T20:22:34,556 INFO [RS:0;5a964fc427ed:38299 {}] regionserver.HRegionServer(976): stopping server 5a964fc427ed,38299,1731961314990; all regions closed. 2024-11-18T20:22:34,557 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:34,557 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:34,557 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:34,557 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:34,557 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:34,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741897_1082 (size=825) 2024-11-18T20:22:34,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741897_1082 (size=825) 2024-11-18T20:22:35,393 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-18T20:22:35,394 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:22:35,394 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T20:22:35,494 INFO [regionserver/5a964fc427ed:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:22:36,234 INFO [master/5a964fc427ed:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-18T20:22:36,234 INFO [master/5a964fc427ed:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-18T20:22:36,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:22:36,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741835_1011 (size=393) 2024-11-18T20:22:37,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:22:37,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:22:37,759 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 after 4003ms 2024-11-18T20:22:37,813 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta after 4002ms 2024-11-18T20:22:38,756 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-18T20:22:38,759 DEBUG [RS:1;5a964fc427ed:35553 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/oldWALs 2024-11-18T20:22:38,759 INFO [RS:1;5a964fc427ed:35553 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5a964fc427ed%2C35553%2C1731961316245:(num 1731961316487) 2024-11-18T20:22:38,759 DEBUG [RS:1;5a964fc427ed:35553 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:22:38,759 INFO [RS:1;5a964fc427ed:35553 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:22:38,760 INFO [RS:1;5a964fc427ed:35553 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:22:38,760 INFO [RS:1;5a964fc427ed:35553 {}] hbase.ChoreService(370): Chore service for: regionserver/5a964fc427ed:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T20:22:38,760 INFO [RS:1;5a964fc427ed:35553 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T20:22:38,760 INFO [RS:1;5a964fc427ed:35553 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T20:22:38,760 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:22:38,760 INFO [RS:1;5a964fc427ed:35553 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T20:22:38,760 INFO [RS:1;5a964fc427ed:35553 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:22:38,760 INFO [RS:1;5a964fc427ed:35553 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35553 2024-11-18T20:22:38,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:38,775 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35553-0x10150ca23c40002, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5a964fc427ed,35553,1731961316245 2024-11-18T20:22:38,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:22:38,775 INFO [RS:1;5a964fc427ed:35553 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:22:38,776 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5a964fc427ed,35553,1731961316245] 2024-11-18T20:22:38,791 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5a964fc427ed,35553,1731961316245 already deleted, retry=false 2024-11-18T20:22:38,791 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5a964fc427ed,35553,1731961316245 expired; onlineServers=1 2024-11-18T20:22:38,883 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35553-0x10150ca23c40002, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:22:38,883 INFO [RS:1;5a964fc427ed:35553 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:22:38,883 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35553-0x10150ca23c40002, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:22:38,883 INFO [RS:1;5a964fc427ed:35553 {}] regionserver.HRegionServer(1031): Exiting; stopping=5a964fc427ed,35553,1731961316245; zookeeper connection closed. 2024-11-18T20:22:38,883 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@253bbd21 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@253bbd21 2024-11-18T20:22:38,889 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:38,908 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:38,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:38,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:38,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:38,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:38,932 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:38,933 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:39,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741878_1061 (size=12911) 2024-11-18T20:22:39,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741840_1022 (size=2431) 2024-11-18T20:22:39,435 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T20:22:39,456 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:39,457 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:39,457 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:39,457 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:39,458 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:39,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:39,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:39,465 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:39,558 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-18T20:22:39,562 DEBUG [RS:0;5a964fc427ed:38299 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/oldWALs 2024-11-18T20:22:39,562 INFO [RS:0;5a964fc427ed:38299 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5a964fc427ed%2C38299%2C1731961314990.meta:.meta(num 1731961353757) 2024-11-18T20:22:39,563 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:39,563 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:39,563 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:39,563 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:39,563 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:39,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741891_1075 (size=14682) 2024-11-18T20:22:39,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741891_1075 (size=14682) 2024-11-18T20:22:39,568 DEBUG [RS:0;5a964fc427ed:38299 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/oldWALs 2024-11-18T20:22:39,568 INFO [RS:0;5a964fc427ed:38299 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5a964fc427ed%2C38299%2C1731961314990:(num 1731961353465) 2024-11-18T20:22:39,568 DEBUG [RS:0;5a964fc427ed:38299 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:22:39,568 INFO [RS:0;5a964fc427ed:38299 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:22:39,568 INFO [RS:0;5a964fc427ed:38299 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:22:39,569 INFO [RS:0;5a964fc427ed:38299 {}] hbase.ChoreService(370): Chore service for: regionserver/5a964fc427ed:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T20:22:39,569 INFO [RS:0;5a964fc427ed:38299 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:22:39,569 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:22:39,569 INFO [RS:0;5a964fc427ed:38299 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38299 2024-11-18T20:22:39,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38299-0x10150ca23c40001, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5a964fc427ed,38299,1731961314990 2024-11-18T20:22:39,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:22:39,616 INFO [RS:0;5a964fc427ed:38299 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:22:39,624 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5a964fc427ed,38299,1731961314990] 2024-11-18T20:22:39,632 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5a964fc427ed,38299,1731961314990 already deleted, retry=false 2024-11-18T20:22:39,632 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5a964fc427ed,38299,1731961314990 expired; onlineServers=0 2024-11-18T20:22:39,632 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5a964fc427ed,37903,1731961314859' ***** 2024-11-18T20:22:39,632 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T20:22:39,633 INFO [M:0;5a964fc427ed:37903 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:22:39,633 INFO [M:0;5a964fc427ed:37903 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:22:39,633 DEBUG [M:0;5a964fc427ed:37903 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T20:22:39,633 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T20:22:39,633 DEBUG [M:0;5a964fc427ed:37903 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T20:22:39,633 DEBUG [master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.small.0-1731961315415 {}] cleaner.HFileCleaner(306): Exit Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.small.0-1731961315415,5,FailOnTimeoutGroup] 2024-11-18T20:22:39,633 DEBUG [master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.large.0-1731961315415 {}] cleaner.HFileCleaner(306): Exit Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.large.0-1731961315415,5,FailOnTimeoutGroup] 2024-11-18T20:22:39,633 INFO [M:0;5a964fc427ed:37903 {}] hbase.ChoreService(370): Chore service for: master/5a964fc427ed:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T20:22:39,633 INFO [M:0;5a964fc427ed:37903 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:22:39,633 DEBUG [M:0;5a964fc427ed:37903 {}] master.HMaster(1795): Stopping service threads 2024-11-18T20:22:39,633 INFO [M:0;5a964fc427ed:37903 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T20:22:39,633 INFO [M:0;5a964fc427ed:37903 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:22:39,633 INFO [M:0;5a964fc427ed:37903 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T20:22:39,634 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T20:22:39,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T20:22:39,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:22:39,641 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-18T20:22:39,642 DEBUG [RegionServerTracker-0 {}] master.ActiveMasterManager(353): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-18T20:22:39,642 INFO [M:0;5a964fc427ed:37903 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/.lastflushedseqids 2024-11-18T20:22:39,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741904_1090 (size=130) 2024-11-18T20:22:39,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741904_1090 (size=130) 2024-11-18T20:22:39,671 INFO [M:0;5a964fc427ed:37903 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T20:22:39,671 INFO [M:0;5a964fc427ed:37903 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T20:22:39,672 DEBUG [M:0;5a964fc427ed:37903 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:22:39,672 INFO [M:0;5a964fc427ed:37903 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:22:39,672 DEBUG [M:0;5a964fc427ed:37903 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:22:39,672 DEBUG [M:0;5a964fc427ed:37903 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:22:39,672 DEBUG [M:0;5a964fc427ed:37903 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:22:39,672 INFO [M:0;5a964fc427ed:37903 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-18T20:22:39,698 DEBUG [M:0;5a964fc427ed:37903 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/64688fc660a24a8f89c7d28e6cc21caa is 82, key is hbase:meta,,1/info:regioninfo/1731961316141/Put/seqid=0 2024-11-18T20:22:39,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741905_1091 (size=5672) 2024-11-18T20:22:39,715 INFO [M:0;5a964fc427ed:37903 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/64688fc660a24a8f89c7d28e6cc21caa 2024-11-18T20:22:39,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741905_1091 (size=5672) 2024-11-18T20:22:39,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38299-0x10150ca23c40001, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:22:39,724 INFO [RS:0;5a964fc427ed:38299 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:22:39,724 INFO [RS:0;5a964fc427ed:38299 {}] regionserver.HRegionServer(1031): Exiting; stopping=5a964fc427ed,38299,1731961314990; zookeeper connection closed. 2024-11-18T20:22:39,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38299-0x10150ca23c40001, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:22:39,727 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@175ebc4b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@175ebc4b 2024-11-18T20:22:39,727 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-18T20:22:39,752 DEBUG [M:0;5a964fc427ed:37903 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4d4c60f046d745c893c41259f139c86b is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731961316802/Put/seqid=0 2024-11-18T20:22:39,766 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:39,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741906_1092 (size=6255) 2024-11-18T20:22:39,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741906_1092 (size=6255) 2024-11-18T20:22:39,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:40,183 INFO [M:0;5a964fc427ed:37903 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4d4c60f046d745c893c41259f139c86b 2024-11-18T20:22:40,195 INFO [M:0;5a964fc427ed:37903 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 4d4c60f046d745c893c41259f139c86b 2024-11-18T20:22:40,210 DEBUG [M:0;5a964fc427ed:37903 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/17303646c3a347a5b830453d481e669e is 69, key is 5a964fc427ed,35553,1731961316245/rs:state/1731961316327/Put/seqid=0 2024-11-18T20:22:40,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741907_1093 (size=5224) 2024-11-18T20:22:40,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741907_1093 (size=5224) 2024-11-18T20:22:40,215 INFO [M:0;5a964fc427ed:37903 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/17303646c3a347a5b830453d481e669e 2024-11-18T20:22:40,236 DEBUG [M:0;5a964fc427ed:37903 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/94a656e055ce46d884b9277449ab57a7 is 52, key is load_balancer_on/state:d/1731961316227/Put/seqid=0 2024-11-18T20:22:40,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741908_1094 (size=5056) 2024-11-18T20:22:40,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741908_1094 (size=5056) 2024-11-18T20:22:40,242 INFO [M:0;5a964fc427ed:37903 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/94a656e055ce46d884b9277449ab57a7 2024-11-18T20:22:40,247 DEBUG [M:0;5a964fc427ed:37903 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/64688fc660a24a8f89c7d28e6cc21caa as hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/64688fc660a24a8f89c7d28e6cc21caa 2024-11-18T20:22:40,252 INFO [M:0;5a964fc427ed:37903 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/64688fc660a24a8f89c7d28e6cc21caa, entries=8, sequenceid=60, filesize=5.5 K 2024-11-18T20:22:40,253 DEBUG [M:0;5a964fc427ed:37903 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4d4c60f046d745c893c41259f139c86b as hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4d4c60f046d745c893c41259f139c86b 2024-11-18T20:22:40,258 INFO [M:0;5a964fc427ed:37903 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 4d4c60f046d745c893c41259f139c86b 2024-11-18T20:22:40,259 INFO [M:0;5a964fc427ed:37903 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4d4c60f046d745c893c41259f139c86b, entries=6, sequenceid=60, filesize=6.1 K 2024-11-18T20:22:40,260 DEBUG [M:0;5a964fc427ed:37903 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/17303646c3a347a5b830453d481e669e as hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/17303646c3a347a5b830453d481e669e 2024-11-18T20:22:40,265 INFO [M:0;5a964fc427ed:37903 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/17303646c3a347a5b830453d481e669e, entries=2, sequenceid=60, filesize=5.1 K 2024-11-18T20:22:40,266 DEBUG [M:0;5a964fc427ed:37903 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/94a656e055ce46d884b9277449ab57a7 as hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/94a656e055ce46d884b9277449ab57a7 2024-11-18T20:22:40,271 INFO [M:0;5a964fc427ed:37903 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/94a656e055ce46d884b9277449ab57a7, entries=1, sequenceid=60, filesize=4.9 K 2024-11-18T20:22:40,273 INFO [M:0;5a964fc427ed:37903 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 601ms, sequenceid=60, compaction requested=false 2024-11-18T20:22:40,274 INFO [M:0;5a964fc427ed:37903 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:22:40,274 DEBUG [M:0;5a964fc427ed:37903 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961359672Disabling compacts and flushes for region at 1731961359672Disabling writes for close at 1731961359672Obtaining lock to block concurrent updates at 1731961359672Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731961359672Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1731961359673 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731961359674 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731961359674Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731961359697 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731961359697Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731961359723 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731961359752 (+29 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731961359752Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731961360196 (+444 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731961360209 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731961360209Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731961360220 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731961360235 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731961360235Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@51dfe0c2: reopening flushed file at 1731961360246 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2827c27a: reopening flushed file at 1731961360252 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17d7409c: reopening flushed file at 1731961360259 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@72d3600: reopening flushed file at 1731961360265 (+6 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 601ms, sequenceid=60, compaction requested=false at 1731961360273 (+8 ms)Writing region close event to WAL at 1731961360274 (+1 ms)Closed at 1731961360274 2024-11-18T20:22:40,275 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:40,275 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:40,275 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:40,275 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:40,275 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:40,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741889_1072 (size=1045) 2024-11-18T20:22:40,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40087 is added to blk_1073741889_1072 (size=1045) 2024-11-18T20:22:40,341 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@38eb7426 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1540393714-172.17.0.2-1731961313479:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:41917,null,null]) java.net.ConnectException: Call From 5a964fc427ed/172.17.0.2 to localhost:38463 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-18T20:22:40,438 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/WALs/5a964fc427ed,37903,1731961314859/5a964fc427ed%2C37903%2C1731961314859.1731961315159 to hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/oldWALs/5a964fc427ed%2C37903%2C1731961314859.1731961315159 2024-11-18T20:22:40,444 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/MasterData/oldWALs/5a964fc427ed%2C37903%2C1731961314859.1731961315159 to hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/oldWALs/5a964fc427ed%2C37903%2C1731961314859.1731961315159$masterlocalwal$ 2024-11-18T20:22:40,444 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:22:40,444 INFO [M:0;5a964fc427ed:37903 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T20:22:40,444 INFO [M:0;5a964fc427ed:37903 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37903 2024-11-18T20:22:40,445 INFO [M:0;5a964fc427ed:37903 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:22:40,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:22:40,574 INFO [M:0;5a964fc427ed:37903 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:22:40,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37903-0x10150ca23c40000, quorum=127.0.0.1:58477, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:22:40,576 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@25f6ac6c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:22:40,576 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2586080f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:22:40,577 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:22:40,577 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6842affb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:22:40,577 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a8eeeb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/hadoop.log.dir/,STOPPED} 2024-11-18T20:22:40,578 WARN [BP-1540393714-172.17.0.2-1731961313479 heartbeating to localhost/127.0.0.1:33409 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:22:40,578 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:22:40,578 WARN [BP-1540393714-172.17.0.2-1731961313479 heartbeating to localhost/127.0.0.1:33409 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1540393714-172.17.0.2-1731961313479 (Datanode Uuid 668a6dfd-9553-4de0-a7a1-42a381a2d7bc) service to localhost/127.0.0.1:33409 2024-11-18T20:22:40,578 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:22:40,578 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5e2bf7d5 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1540393714-172.17.0.2-1731961313479:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:41917,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:38463 , LocalHost:localPort 5a964fc427ed/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-18T20:22:40,579 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5e2bf7d5 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1540393714-172.17.0.2-1731961313479:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:32943,null,null], DatanodeInfoWithStorage[127.0.0.1:41917,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-1540393714-172.17.0.2-1731961313479 2024-11-18T20:22:40,579 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5e2bf7d5 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1540393714-172.17.0.2-1731961313479:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:32943,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1540393714-172.17.0.2-1731961313479 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:40,579 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5e2bf7d5 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1540393714-172.17.0.2-1731961313479:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:41917,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1540393714-172.17.0.2-1731961313479 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:40,579 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5e2bf7d5 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1540393714-172.17.0.2-1731961313479:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:32943,null,null], DatanodeInfoWithStorage[127.0.0.1:41917,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1540393714-172.17.0.2-1731961313479:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:32943,null,null], DatanodeInfoWithStorage[127.0.0.1:41917,null,null]] 2024-11-18T20:22:40,579 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data3/current/BP-1540393714-172.17.0.2-1731961313479 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:22:40,579 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data4/current/BP-1540393714-172.17.0.2-1731961313479 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:22:40,580 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:22:40,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@65429201{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:22:40,583 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4cbb1003{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:22:40,583 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:22:40,583 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5dc4ef73{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:22:40,583 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cac6b83{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/hadoop.log.dir/,STOPPED} 2024-11-18T20:22:40,585 WARN [BP-1540393714-172.17.0.2-1731961313479 heartbeating to localhost/127.0.0.1:33409 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:22:40,585 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:22:40,585 WARN [BP-1540393714-172.17.0.2-1731961313479 heartbeating to localhost/127.0.0.1:33409 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1540393714-172.17.0.2-1731961313479 (Datanode Uuid 444b2f8f-adaa-4f36-aee7-d5f1d9cf7937) service to localhost/127.0.0.1:33409 2024-11-18T20:22:40,585 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:22:40,585 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data5/current/BP-1540393714-172.17.0.2-1731961313479 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:22:40,586 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/cluster_7f08645b-f697-083d-ebeb-4f8db9a3e3e4/data/data6/current/BP-1540393714-172.17.0.2-1731961313479 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:22:40,586 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:22:40,593 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2606b08f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:22:40,593 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@c053989{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:22:40,593 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:22:40,593 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3150e6db{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:22:40,594 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1aa07d80{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/hadoop.log.dir/,STOPPED} 2024-11-18T20:22:40,603 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T20:22:40,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T20:22:40,639 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=156 (was 81) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33409 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33409 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:42479 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:33409 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:33409 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33409 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33409 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33409 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33409 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f0cd8bf4db8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33409 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33409 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33409 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42479 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f0cd8bf4db8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=410 (was 648), ProcessCount=11 (was 11), AvailableMemoryMB=3684 (was 4105) 2024-11-18T20:22:40,647 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=156, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=410, ProcessCount=11, AvailableMemoryMB=3684 2024-11-18T20:22:40,647 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T20:22:40,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/hadoop.log.dir so I do NOT create it in target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2 2024-11-18T20:22:40,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57613257-4118-95b9-dd0f-d98ca07f08b1/hadoop.tmp.dir so I do NOT create it in target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2 2024-11-18T20:22:40,648 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/cluster_75eed8fb-572f-9b52-9aa7-b7959e3421b4, deleteOnExit=true 2024-11-18T20:22:40,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T20:22:40,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/test.cache.data in system properties and HBase conf 2024-11-18T20:22:40,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T20:22:40,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/hadoop.log.dir in system properties and HBase conf 2024-11-18T20:22:40,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T20:22:40,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T20:22:40,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T20:22:40,648 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T20:22:40,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:22:40,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:22:40,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T20:22:40,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:22:40,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T20:22:40,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T20:22:40,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:22:40,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:22:40,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T20:22:40,650 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/nfs.dump.dir in system properties and HBase conf 2024-11-18T20:22:40,650 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/java.io.tmpdir in system properties and HBase conf 2024-11-18T20:22:40,650 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:22:40,650 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T20:22:40,650 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T20:22:40,663 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:22:40,767 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:40,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:40,911 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:22:40,916 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:22:40,917 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:22:40,917 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:22:40,918 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:22:40,918 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:22:40,919 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e0e18a9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:22:40,919 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45628471{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:22:41,011 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@19160285{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/java.io.tmpdir/jetty-localhost-32791-hadoop-hdfs-3_4_1-tests_jar-_-any-17815577047936348746/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:22:41,012 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@40c321ed{HTTP/1.1, (http/1.1)}{localhost:32791} 2024-11-18T20:22:41,012 INFO [Time-limited test {}] server.Server(415): Started @152503ms 2024-11-18T20:22:41,024 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:22:41,223 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:22:41,226 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:22:41,227 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:22:41,227 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:22:41,227 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:22:41,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7353ad08{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:22:41,228 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30a928dc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:22:41,321 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6c3df9c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/java.io.tmpdir/jetty-localhost-33927-hadoop-hdfs-3_4_1-tests_jar-_-any-18000765020381308079/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:22:41,321 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@623a52f4{HTTP/1.1, (http/1.1)}{localhost:33927} 2024-11-18T20:22:41,322 INFO [Time-limited test {}] server.Server(415): Started @152813ms 2024-11-18T20:22:41,323 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:22:41,357 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:22:41,362 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:22:41,362 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:22:41,362 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:22:41,362 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:22:41,363 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b27dfb0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:22:41,364 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8825f29{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:22:41,457 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5921b3be{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/java.io.tmpdir/jetty-localhost-36395-hadoop-hdfs-3_4_1-tests_jar-_-any-11680790421759777671/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:22:41,457 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1118a265{HTTP/1.1, (http/1.1)}{localhost:36395} 2024-11-18T20:22:41,457 INFO [Time-limited test {}] server.Server(415): Started @152949ms 2024-11-18T20:22:41,458 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:22:41,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:41,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:41,940 WARN [Thread-1197 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/cluster_75eed8fb-572f-9b52-9aa7-b7959e3421b4/data/data1/current/BP-286708197-172.17.0.2-1731961360674/current, will proceed with Du for space computation calculation, 2024-11-18T20:22:41,940 WARN [Thread-1198 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/cluster_75eed8fb-572f-9b52-9aa7-b7959e3421b4/data/data2/current/BP-286708197-172.17.0.2-1731961360674/current, will proceed with Du for space computation calculation, 2024-11-18T20:22:41,960 WARN [Thread-1161 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:22:41,962 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x269b70b7ea29e3ee with lease ID 0xd6797ffc66fbcf4: Processing first storage report for DS-72442cc8-041e-491f-88ed-391a71633f17 from datanode DatanodeRegistration(127.0.0.1:35369, datanodeUuid=9967eb4e-3039-43d6-92a8-831875a4513a, infoPort=45057, infoSecurePort=0, ipcPort=43691, storageInfo=lv=-57;cid=testClusterID;nsid=629368935;c=1731961360674) 2024-11-18T20:22:41,962 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x269b70b7ea29e3ee with lease ID 0xd6797ffc66fbcf4: from storage DS-72442cc8-041e-491f-88ed-391a71633f17 node DatanodeRegistration(127.0.0.1:35369, datanodeUuid=9967eb4e-3039-43d6-92a8-831875a4513a, infoPort=45057, infoSecurePort=0, ipcPort=43691, storageInfo=lv=-57;cid=testClusterID;nsid=629368935;c=1731961360674), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:22:41,962 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x269b70b7ea29e3ee with lease ID 0xd6797ffc66fbcf4: Processing first storage report for DS-18c31870-3a2c-48b5-ad95-07e09b10b23d from datanode DatanodeRegistration(127.0.0.1:35369, datanodeUuid=9967eb4e-3039-43d6-92a8-831875a4513a, infoPort=45057, infoSecurePort=0, ipcPort=43691, storageInfo=lv=-57;cid=testClusterID;nsid=629368935;c=1731961360674) 2024-11-18T20:22:41,962 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x269b70b7ea29e3ee with lease ID 0xd6797ffc66fbcf4: from storage DS-18c31870-3a2c-48b5-ad95-07e09b10b23d node DatanodeRegistration(127.0.0.1:35369, datanodeUuid=9967eb4e-3039-43d6-92a8-831875a4513a, infoPort=45057, infoSecurePort=0, ipcPort=43691, storageInfo=lv=-57;cid=testClusterID;nsid=629368935;c=1731961360674), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:22:42,070 WARN [Thread-1208 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/cluster_75eed8fb-572f-9b52-9aa7-b7959e3421b4/data/data3/current/BP-286708197-172.17.0.2-1731961360674/current, will proceed with Du for space computation calculation, 2024-11-18T20:22:42,070 WARN [Thread-1209 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/cluster_75eed8fb-572f-9b52-9aa7-b7959e3421b4/data/data4/current/BP-286708197-172.17.0.2-1731961360674/current, will proceed with Du for space computation calculation, 2024-11-18T20:22:42,088 WARN [Thread-1184 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:22:42,090 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf609f94f3f39e8ef with lease ID 0xd6797ffc66fbcf5: Processing first storage report for DS-446fa850-72d8-4b21-993a-b79bb5f9b7c9 from datanode DatanodeRegistration(127.0.0.1:35743, datanodeUuid=4b0d931c-1d02-4523-beb0-94685c199429, infoPort=37123, infoSecurePort=0, ipcPort=46555, storageInfo=lv=-57;cid=testClusterID;nsid=629368935;c=1731961360674) 2024-11-18T20:22:42,090 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf609f94f3f39e8ef with lease ID 0xd6797ffc66fbcf5: from storage DS-446fa850-72d8-4b21-993a-b79bb5f9b7c9 node DatanodeRegistration(127.0.0.1:35743, datanodeUuid=4b0d931c-1d02-4523-beb0-94685c199429, infoPort=37123, infoSecurePort=0, ipcPort=46555, storageInfo=lv=-57;cid=testClusterID;nsid=629368935;c=1731961360674), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:22:42,090 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf609f94f3f39e8ef with lease ID 0xd6797ffc66fbcf5: Processing first storage report for DS-b7d9f6fe-2a27-44e1-8e2e-528f765c3628 from datanode DatanodeRegistration(127.0.0.1:35743, datanodeUuid=4b0d931c-1d02-4523-beb0-94685c199429, infoPort=37123, infoSecurePort=0, ipcPort=46555, storageInfo=lv=-57;cid=testClusterID;nsid=629368935;c=1731961360674) 2024-11-18T20:22:42,090 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf609f94f3f39e8ef with lease ID 0xd6797ffc66fbcf5: from storage DS-b7d9f6fe-2a27-44e1-8e2e-528f765c3628 node DatanodeRegistration(127.0.0.1:35743, datanodeUuid=4b0d931c-1d02-4523-beb0-94685c199429, infoPort=37123, infoSecurePort=0, ipcPort=46555, storageInfo=lv=-57;cid=testClusterID;nsid=629368935;c=1731961360674), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:22:42,191 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2 2024-11-18T20:22:42,195 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/cluster_75eed8fb-572f-9b52-9aa7-b7959e3421b4/zookeeper_0, clientPort=62599, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/cluster_75eed8fb-572f-9b52-9aa7-b7959e3421b4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/cluster_75eed8fb-572f-9b52-9aa7-b7959e3421b4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T20:22:42,196 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62599 2024-11-18T20:22:42,196 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:22:42,197 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:22:42,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35743 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:22:42,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:22:42,210 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa with version=8 2024-11-18T20:22:42,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/hbase-staging 2024-11-18T20:22:42,212 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5a964fc427ed:0 server-side Connection retries=45 2024-11-18T20:22:42,212 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:22:42,212 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:22:42,212 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:22:42,212 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:22:42,212 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:22:42,212 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T20:22:42,212 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:22:42,213 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44109 2024-11-18T20:22:42,214 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44109 connecting to ZooKeeper ensemble=127.0.0.1:62599 2024-11-18T20:22:42,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:441090x0, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:22:42,249 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44109-0x10150cadcbb0000 connected 2024-11-18T20:22:42,307 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:22:42,309 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:22:42,311 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:22:42,311 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa, hbase.cluster.distributed=false 2024-11-18T20:22:42,313 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:22:42,313 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44109 2024-11-18T20:22:42,313 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44109 2024-11-18T20:22:42,314 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44109 2024-11-18T20:22:42,314 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44109 2024-11-18T20:22:42,314 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44109 2024-11-18T20:22:42,334 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5a964fc427ed:0 server-side Connection retries=45 2024-11-18T20:22:42,335 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:22:42,335 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:22:42,335 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:22:42,335 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:22:42,335 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:22:42,335 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T20:22:42,335 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:22:42,336 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44025 2024-11-18T20:22:42,337 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44025 connecting to ZooKeeper ensemble=127.0.0.1:62599 2024-11-18T20:22:42,338 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:22:42,339 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:22:42,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:440250x0, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:22:42,349 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44025-0x10150cadcbb0001 connected 2024-11-18T20:22:42,349 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44025-0x10150cadcbb0001, quorum=127.0.0.1:62599, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:22:42,350 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T20:22:42,350 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T20:22:42,351 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44025-0x10150cadcbb0001, quorum=127.0.0.1:62599, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T20:22:42,352 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44025-0x10150cadcbb0001, quorum=127.0.0.1:62599, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:22:42,352 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44025 2024-11-18T20:22:42,352 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44025 2024-11-18T20:22:42,353 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44025 2024-11-18T20:22:42,353 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44025 2024-11-18T20:22:42,353 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44025 2024-11-18T20:22:42,367 DEBUG [M:0;5a964fc427ed:44109 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5a964fc427ed:44109 2024-11-18T20:22:42,367 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5a964fc427ed,44109,1731961362211 2024-11-18T20:22:42,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44025-0x10150cadcbb0001, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:22:42,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:22:42,374 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5a964fc427ed,44109,1731961362211 2024-11-18T20:22:42,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:22:42,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44025-0x10150cadcbb0001, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T20:22:42,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44025-0x10150cadcbb0001, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:22:42,383 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T20:22:42,384 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5a964fc427ed,44109,1731961362211 from backup master directory 2024-11-18T20:22:42,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5a964fc427ed,44109,1731961362211 2024-11-18T20:22:42,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44025-0x10150cadcbb0001, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:22:42,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:22:42,391 WARN [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:22:42,391 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5a964fc427ed,44109,1731961362211 2024-11-18T20:22:42,399 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/hbase.id] with ID: e152c6e4-f235-4cba-b812-4b2ab0ab040d 2024-11-18T20:22:42,399 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/.tmp/hbase.id 2024-11-18T20:22:42,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35743 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:22:42,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:22:42,406 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/.tmp/hbase.id]:[hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/hbase.id] 2024-11-18T20:22:42,418 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:22:42,418 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T20:22:42,420 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-18T20:22:42,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:22:42,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44025-0x10150cadcbb0001, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:22:42,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:22:42,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35743 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:22:42,439 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:22:42,440 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T20:22:42,440 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:22:42,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:22:42,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35743 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:22:42,450 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store 2024-11-18T20:22:42,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35743 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:22:42,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:22:42,456 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:22:42,457 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:22:42,457 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:22:42,457 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:22:42,457 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:22:42,457 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:22:42,457 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:22:42,457 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961362457Disabling compacts and flushes for region at 1731961362457Disabling writes for close at 1731961362457Writing region close event to WAL at 1731961362457Closed at 1731961362457 2024-11-18T20:22:42,458 WARN [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/.initializing 2024-11-18T20:22:42,458 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/WALs/5a964fc427ed,44109,1731961362211 2024-11-18T20:22:42,460 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5a964fc427ed%2C44109%2C1731961362211, suffix=, logDir=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/WALs/5a964fc427ed,44109,1731961362211, archiveDir=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/oldWALs, maxLogs=10 2024-11-18T20:22:42,461 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C44109%2C1731961362211.1731961362461 2024-11-18T20:22:42,466 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/WALs/5a964fc427ed,44109,1731961362211/5a964fc427ed%2C44109%2C1731961362211.1731961362461 2024-11-18T20:22:42,468 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37123:37123),(127.0.0.1/127.0.0.1:45057:45057)] 2024-11-18T20:22:42,468 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:22:42,468 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:22:42,469 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:22:42,469 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:22:42,470 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:22:42,471 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T20:22:42,471 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:22:42,472 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:22:42,472 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:22:42,473 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T20:22:42,473 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:22:42,473 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:22:42,473 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:22:42,474 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T20:22:42,474 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:22:42,475 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:22:42,475 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:22:42,476 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T20:22:42,476 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:22:42,476 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:22:42,476 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:22:42,477 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:22:42,477 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:22:42,478 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:22:42,479 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:22:42,479 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T20:22:42,480 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:22:42,482 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:22:42,483 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=759262, jitterRate=-0.03454884886741638}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T20:22:42,483 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731961362469Initializing all the Stores at 1731961362470 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961362470Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961362470Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961362470Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961362470Cleaning up temporary data from old regions at 1731961362479 (+9 ms)Region opened successfully at 1731961362483 (+4 ms) 2024-11-18T20:22:42,484 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T20:22:42,488 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6129458a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5a964fc427ed/172.17.0.2:0 2024-11-18T20:22:42,489 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T20:22:42,489 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T20:22:42,489 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T20:22:42,489 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T20:22:42,490 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T20:22:42,490 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T20:22:42,490 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T20:22:42,492 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T20:22:42,493 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T20:22:42,499 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T20:22:42,499 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T20:22:42,500 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T20:22:42,507 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T20:22:42,507 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T20:22:42,508 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T20:22:42,515 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T20:22:42,516 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T20:22:42,524 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T20:22:42,526 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T20:22:42,532 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T20:22:42,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:22:42,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44025-0x10150cadcbb0001, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:22:42,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44025-0x10150cadcbb0001, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:22:42,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:22:42,541 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5a964fc427ed,44109,1731961362211, sessionid=0x10150cadcbb0000, setting cluster-up flag (Was=false) 2024-11-18T20:22:42,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:22:42,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44025-0x10150cadcbb0001, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:22:42,582 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T20:22:42,584 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5a964fc427ed,44109,1731961362211 2024-11-18T20:22:42,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44025-0x10150cadcbb0001, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:22:42,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:22:42,624 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T20:22:42,625 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5a964fc427ed,44109,1731961362211 2024-11-18T20:22:42,626 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T20:22:42,628 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T20:22:42,628 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T20:22:42,629 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T20:22:42,629 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5a964fc427ed,44109,1731961362211 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T20:22:42,630 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:22:42,630 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:22:42,630 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:22:42,630 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:22:42,630 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5a964fc427ed:0, corePoolSize=10, maxPoolSize=10 2024-11-18T20:22:42,630 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:22:42,631 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5a964fc427ed:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:22:42,631 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:22:42,631 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731961392631 2024-11-18T20:22:42,631 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T20:22:42,631 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T20:22:42,631 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T20:22:42,632 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T20:22:42,632 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T20:22:42,632 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T20:22:42,632 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:22:42,632 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:22:42,633 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T20:22:42,634 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:22:42,634 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T20:22:42,635 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T20:22:42,635 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T20:22:42,635 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T20:22:42,636 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T20:22:42,636 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T20:22:42,636 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.large.0-1731961362636,5,FailOnTimeoutGroup] 2024-11-18T20:22:42,644 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.small.0-1731961362636,5,FailOnTimeoutGroup] 2024-11-18T20:22:42,644 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:22:42,644 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T20:22:42,644 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T20:22:42,645 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T20:22:42,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:22:42,656 INFO [RS:0;5a964fc427ed:44025 {}] regionserver.HRegionServer(746): ClusterId : e152c6e4-f235-4cba-b812-4b2ab0ab040d 2024-11-18T20:22:42,656 DEBUG [RS:0;5a964fc427ed:44025 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T20:22:42,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35743 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:22:42,658 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T20:22:42,658 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa 2024-11-18T20:22:42,667 DEBUG [RS:0;5a964fc427ed:44025 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T20:22:42,667 DEBUG [RS:0;5a964fc427ed:44025 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T20:22:42,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35743 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:22:42,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:22:42,678 DEBUG [RS:0;5a964fc427ed:44025 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T20:22:42,679 DEBUG [RS:0;5a964fc427ed:44025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52857927, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5a964fc427ed/172.17.0.2:0 2024-11-18T20:22:42,692 DEBUG [RS:0;5a964fc427ed:44025 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5a964fc427ed:44025 2024-11-18T20:22:42,692 INFO [RS:0;5a964fc427ed:44025 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T20:22:42,692 INFO [RS:0;5a964fc427ed:44025 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T20:22:42,692 DEBUG [RS:0;5a964fc427ed:44025 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T20:22:42,693 INFO [RS:0;5a964fc427ed:44025 {}] regionserver.HRegionServer(2659): reportForDuty to master=5a964fc427ed,44109,1731961362211 with port=44025, startcode=1731961362334 2024-11-18T20:22:42,693 DEBUG [RS:0;5a964fc427ed:44025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T20:22:42,696 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51479, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T20:22:42,697 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44109 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5a964fc427ed,44025,1731961362334 2024-11-18T20:22:42,697 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44109 {}] master.ServerManager(517): Registering regionserver=5a964fc427ed,44025,1731961362334 2024-11-18T20:22:42,699 DEBUG [RS:0;5a964fc427ed:44025 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa 2024-11-18T20:22:42,699 DEBUG [RS:0;5a964fc427ed:44025 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44059 2024-11-18T20:22:42,699 DEBUG [RS:0;5a964fc427ed:44025 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T20:22:42,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:22:42,708 DEBUG [RS:0;5a964fc427ed:44025 {}] zookeeper.ZKUtil(111): regionserver:44025-0x10150cadcbb0001, quorum=127.0.0.1:62599, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5a964fc427ed,44025,1731961362334 2024-11-18T20:22:42,708 WARN [RS:0;5a964fc427ed:44025 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:22:42,708 INFO [RS:0;5a964fc427ed:44025 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:22:42,708 DEBUG [RS:0;5a964fc427ed:44025 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334 2024-11-18T20:22:42,708 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5a964fc427ed,44025,1731961362334] 2024-11-18T20:22:42,714 INFO [RS:0;5a964fc427ed:44025 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T20:22:42,715 INFO [RS:0;5a964fc427ed:44025 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T20:22:42,718 INFO [RS:0;5a964fc427ed:44025 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T20:22:42,718 INFO [RS:0;5a964fc427ed:44025 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:22:42,720 INFO [RS:0;5a964fc427ed:44025 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T20:22:42,721 INFO [RS:0;5a964fc427ed:44025 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T20:22:42,721 INFO [RS:0;5a964fc427ed:44025 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T20:22:42,722 DEBUG [RS:0;5a964fc427ed:44025 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:22:42,722 DEBUG [RS:0;5a964fc427ed:44025 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:22:42,722 DEBUG [RS:0;5a964fc427ed:44025 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:22:42,722 DEBUG [RS:0;5a964fc427ed:44025 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:22:42,722 DEBUG [RS:0;5a964fc427ed:44025 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:22:42,722 DEBUG [RS:0;5a964fc427ed:44025 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5a964fc427ed:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:22:42,722 DEBUG [RS:0;5a964fc427ed:44025 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:22:42,722 DEBUG [RS:0;5a964fc427ed:44025 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:22:42,722 DEBUG [RS:0;5a964fc427ed:44025 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:22:42,722 DEBUG [RS:0;5a964fc427ed:44025 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:22:42,722 DEBUG [RS:0;5a964fc427ed:44025 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:22:42,722 DEBUG [RS:0;5a964fc427ed:44025 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:22:42,722 DEBUG [RS:0;5a964fc427ed:44025 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5a964fc427ed:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:22:42,722 DEBUG [RS:0;5a964fc427ed:44025 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:22:42,724 INFO [RS:0;5a964fc427ed:44025 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:22:42,724 INFO [RS:0;5a964fc427ed:44025 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:22:42,724 INFO [RS:0;5a964fc427ed:44025 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:22:42,724 INFO [RS:0;5a964fc427ed:44025 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T20:22:42,724 INFO [RS:0;5a964fc427ed:44025 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T20:22:42,724 INFO [RS:0;5a964fc427ed:44025 {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,44025,1731961362334-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:22:42,746 INFO [RS:0;5a964fc427ed:44025 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T20:22:42,746 INFO [RS:0;5a964fc427ed:44025 {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,44025,1731961362334-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:22:42,746 INFO [RS:0;5a964fc427ed:44025 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:22:42,746 INFO [RS:0;5a964fc427ed:44025 {}] regionserver.Replication(171): 5a964fc427ed,44025,1731961362334 started 2024-11-18T20:22:42,763 INFO [RS:0;5a964fc427ed:44025 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:22:42,763 INFO [RS:0;5a964fc427ed:44025 {}] regionserver.HRegionServer(1482): Serving as 5a964fc427ed,44025,1731961362334, RpcServer on 5a964fc427ed/172.17.0.2:44025, sessionid=0x10150cadcbb0001 2024-11-18T20:22:42,763 DEBUG [RS:0;5a964fc427ed:44025 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T20:22:42,763 DEBUG [RS:0;5a964fc427ed:44025 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5a964fc427ed,44025,1731961362334 2024-11-18T20:22:42,763 DEBUG [RS:0;5a964fc427ed:44025 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5a964fc427ed,44025,1731961362334' 2024-11-18T20:22:42,763 DEBUG [RS:0;5a964fc427ed:44025 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T20:22:42,764 DEBUG [RS:0;5a964fc427ed:44025 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T20:22:42,764 DEBUG [RS:0;5a964fc427ed:44025 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T20:22:42,764 DEBUG [RS:0;5a964fc427ed:44025 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T20:22:42,764 DEBUG [RS:0;5a964fc427ed:44025 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5a964fc427ed,44025,1731961362334 2024-11-18T20:22:42,764 DEBUG [RS:0;5a964fc427ed:44025 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5a964fc427ed,44025,1731961362334' 2024-11-18T20:22:42,764 DEBUG [RS:0;5a964fc427ed:44025 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T20:22:42,765 DEBUG [RS:0;5a964fc427ed:44025 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T20:22:42,765 DEBUG [RS:0;5a964fc427ed:44025 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T20:22:42,765 INFO [RS:0;5a964fc427ed:44025 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T20:22:42,765 INFO [RS:0;5a964fc427ed:44025 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T20:22:42,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:42,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:42,870 INFO [RS:0;5a964fc427ed:44025 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5a964fc427ed%2C44025%2C1731961362334, suffix=, logDir=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334, archiveDir=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/oldWALs, maxLogs=32 2024-11-18T20:22:42,872 INFO [RS:0;5a964fc427ed:44025 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C44025%2C1731961362334.1731961362871 2024-11-18T20:22:42,880 INFO [RS:0;5a964fc427ed:44025 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961362871 2024-11-18T20:22:42,881 DEBUG [RS:0;5a964fc427ed:44025 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45057:45057),(127.0.0.1/127.0.0.1:37123:37123)] 2024-11-18T20:22:43,076 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:22:43,079 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:22:43,083 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:22:43,083 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:22:43,084 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:22:43,085 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:22:43,088 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:22:43,088 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:22:43,089 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:22:43,089 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:22:43,091 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:22:43,091 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:22:43,091 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:22:43,092 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:22:43,093 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:22:43,093 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:22:43,094 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:22:43,094 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:22:43,095 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/hbase/meta/1588230740 2024-11-18T20:22:43,095 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/hbase/meta/1588230740 2024-11-18T20:22:43,097 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:22:43,097 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:22:43,098 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:22:43,099 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:22:43,102 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:22:43,102 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=741526, jitterRate=-0.05710132420063019}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:22:43,103 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731961363076Initializing all the Stores at 1731961363078 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961363078Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961363079 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961363079Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961363079Cleaning up temporary data from old regions at 1731961363097 (+18 ms)Region opened successfully at 1731961363103 (+6 ms) 2024-11-18T20:22:43,103 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:22:43,103 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:22:43,103 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:22:43,103 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:22:43,103 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:22:43,104 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:22:43,104 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961363103Disabling compacts and flushes for region at 1731961363103Disabling writes for close at 1731961363103Writing region close event to WAL at 1731961363104 (+1 ms)Closed at 1731961363104 2024-11-18T20:22:43,106 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:22:43,106 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T20:22:43,106 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T20:22:43,107 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:22:43,108 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T20:22:43,259 DEBUG [5a964fc427ed:44109 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T20:22:43,259 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5a964fc427ed,44025,1731961362334 2024-11-18T20:22:43,261 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5a964fc427ed,44025,1731961362334, state=OPENING 2024-11-18T20:22:43,291 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T20:22:43,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:22:43,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44025-0x10150cadcbb0001, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:22:43,300 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:22:43,301 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5a964fc427ed,44025,1731961362334}] 2024-11-18T20:22:43,301 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:22:43,301 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:22:43,454 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T20:22:43,458 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58123, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T20:22:43,465 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T20:22:43,465 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:22:43,468 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5a964fc427ed%2C44025%2C1731961362334.meta, suffix=.meta, logDir=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334, archiveDir=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/oldWALs, maxLogs=32 2024-11-18T20:22:43,469 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C44025%2C1731961362334.meta.1731961363469.meta 2024-11-18T20:22:43,476 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.meta.1731961363469.meta 2024-11-18T20:22:43,477 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37123:37123),(127.0.0.1/127.0.0.1:45057:45057)] 2024-11-18T20:22:43,478 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:22:43,478 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T20:22:43,478 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T20:22:43,478 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T20:22:43,478 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T20:22:43,479 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:22:43,479 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T20:22:43,479 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T20:22:43,480 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:22:43,481 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:22:43,482 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:22:43,482 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:22:43,482 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:22:43,483 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:22:43,483 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:22:43,484 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:22:43,484 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:22:43,485 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:22:43,485 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:22:43,486 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:22:43,486 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:22:43,487 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:22:43,488 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:22:43,488 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:22:43,488 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:22:43,490 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/hbase/meta/1588230740 2024-11-18T20:22:43,491 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/hbase/meta/1588230740 2024-11-18T20:22:43,492 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:22:43,492 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:22:43,493 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:22:43,494 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:22:43,495 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=854599, jitterRate=0.0866793841123581}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:22:43,496 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T20:22:43,496 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731961363479Writing region info on filesystem at 1731961363479Initializing all the Stores at 1731961363480 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961363480Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961363480Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961363480Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961363480Cleaning up temporary data from old regions at 1731961363492 (+12 ms)Running coprocessor post-open hooks at 1731961363496 (+4 ms)Region opened successfully at 1731961363496 2024-11-18T20:22:43,497 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731961363453 2024-11-18T20:22:43,499 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T20:22:43,499 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T20:22:43,500 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5a964fc427ed,44025,1731961362334 2024-11-18T20:22:43,501 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5a964fc427ed,44025,1731961362334, state=OPEN 2024-11-18T20:22:43,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:22:43,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44025-0x10150cadcbb0001, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:22:43,524 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5a964fc427ed,44025,1731961362334 2024-11-18T20:22:43,525 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:22:43,525 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:22:43,528 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T20:22:43,528 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5a964fc427ed,44025,1731961362334 in 224 msec 2024-11-18T20:22:43,531 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T20:22:43,531 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 422 msec 2024-11-18T20:22:43,532 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:22:43,532 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T20:22:43,534 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:22:43,534 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5a964fc427ed,44025,1731961362334, seqNum=-1] 2024-11-18T20:22:43,534 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:22:43,536 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49209, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:22:43,543 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 914 msec 2024-11-18T20:22:43,544 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731961363544, completionTime=-1 2024-11-18T20:22:43,544 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T20:22:43,544 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T20:22:43,546 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T20:22:43,546 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731961423546 2024-11-18T20:22:43,546 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731961483546 2024-11-18T20:22:43,546 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-18T20:22:43,547 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,44109,1731961362211-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:22:43,547 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,44109,1731961362211-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:22:43,547 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,44109,1731961362211-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:22:43,547 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5a964fc427ed:44109, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:22:43,547 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T20:22:43,547 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T20:22:43,549 DEBUG [master/5a964fc427ed:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T20:22:43,553 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.162sec 2024-11-18T20:22:43,553 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T20:22:43,553 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T20:22:43,553 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T20:22:43,553 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T20:22:43,553 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T20:22:43,554 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,44109,1731961362211-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:22:43,554 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,44109,1731961362211-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T20:22:43,557 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@211eec9a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:22:43,557 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5a964fc427ed,44109,-1 for getting cluster id 2024-11-18T20:22:43,557 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T20:22:43,557 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T20:22:43,557 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T20:22:43,558 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,44109,1731961362211-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:22:43,559 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e152c6e4-f235-4cba-b812-4b2ab0ab040d' 2024-11-18T20:22:43,560 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T20:22:43,560 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e152c6e4-f235-4cba-b812-4b2ab0ab040d" 2024-11-18T20:22:43,560 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b3cb8fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:22:43,560 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5a964fc427ed,44109,-1] 2024-11-18T20:22:43,560 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T20:22:43,561 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:22:43,562 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37168, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T20:22:43,563 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6beed7e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:22:43,563 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:22:43,564 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5a964fc427ed,44025,1731961362334, seqNum=-1] 2024-11-18T20:22:43,564 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:22:43,566 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45354, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:22:43,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5a964fc427ed,44109,1731961362211 2024-11-18T20:22:43,568 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:22:43,570 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T20:22:43,570 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-18T20:22:43,570 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-18T20:22:43,571 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T20:22:43,572 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 5a964fc427ed,44109,1731961362211 2024-11-18T20:22:43,572 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@40527c68 2024-11-18T20:22:43,572 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T20:22:43,573 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37170, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T20:22:43,574 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44109 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T20:22:43,574 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44109 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T20:22:43,574 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44109 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:22:43,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44109 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-18T20:22:43,577 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T20:22:43,577 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:22:43,577 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44109 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-18T20:22:43,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44109 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T20:22:43,578 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T20:22:43,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741835_1011 (size=395) 2024-11-18T20:22:43,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35743 is added to blk_1073741835_1011 (size=395) 2024-11-18T20:22:43,587 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => abda25881cce04893ba95e29daa6eb15, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa 2024-11-18T20:22:43,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741836_1012 (size=78) 2024-11-18T20:22:43,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35743 is added to blk_1073741836_1012 (size=78) 2024-11-18T20:22:43,595 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:22:43,595 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing abda25881cce04893ba95e29daa6eb15, disabling compactions & flushes 2024-11-18T20:22:43,595 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15. 2024-11-18T20:22:43,595 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15. 2024-11-18T20:22:43,595 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15. after waiting 0 ms 2024-11-18T20:22:43,595 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15. 2024-11-18T20:22:43,595 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15. 2024-11-18T20:22:43,595 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for abda25881cce04893ba95e29daa6eb15: Waiting for close lock at 1731961363595Disabling compacts and flushes for region at 1731961363595Disabling writes for close at 1731961363595Writing region close event to WAL at 1731961363595Closed at 1731961363595 2024-11-18T20:22:43,597 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T20:22:43,597 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731961363597"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731961363597"}]},"ts":"1731961363597"} 2024-11-18T20:22:43,599 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T20:22:43,600 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T20:22:43,601 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731961363600"}]},"ts":"1731961363600"} 2024-11-18T20:22:43,602 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-18T20:22:43,603 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=abda25881cce04893ba95e29daa6eb15, ASSIGN}] 2024-11-18T20:22:43,604 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=abda25881cce04893ba95e29daa6eb15, ASSIGN 2024-11-18T20:22:43,605 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=abda25881cce04893ba95e29daa6eb15, ASSIGN; state=OFFLINE, location=5a964fc427ed,44025,1731961362334; forceNewPlan=false, retain=false 2024-11-18T20:22:43,756 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=abda25881cce04893ba95e29daa6eb15, regionState=OPENING, regionLocation=5a964fc427ed,44025,1731961362334 2024-11-18T20:22:43,759 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=abda25881cce04893ba95e29daa6eb15, ASSIGN because future has completed 2024-11-18T20:22:43,760 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure abda25881cce04893ba95e29daa6eb15, server=5a964fc427ed,44025,1731961362334}] 2024-11-18T20:22:43,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:43,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:43,919 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15. 2024-11-18T20:22:43,919 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => abda25881cce04893ba95e29daa6eb15, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:22:43,919 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart abda25881cce04893ba95e29daa6eb15 2024-11-18T20:22:43,919 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:22:43,919 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for abda25881cce04893ba95e29daa6eb15 2024-11-18T20:22:43,919 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for abda25881cce04893ba95e29daa6eb15 2024-11-18T20:22:43,921 INFO [StoreOpener-abda25881cce04893ba95e29daa6eb15-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region abda25881cce04893ba95e29daa6eb15 2024-11-18T20:22:43,922 INFO [StoreOpener-abda25881cce04893ba95e29daa6eb15-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region abda25881cce04893ba95e29daa6eb15 columnFamilyName info 2024-11-18T20:22:43,922 DEBUG [StoreOpener-abda25881cce04893ba95e29daa6eb15-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:22:43,923 INFO [StoreOpener-abda25881cce04893ba95e29daa6eb15-1 {}] regionserver.HStore(327): Store=abda25881cce04893ba95e29daa6eb15/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:22:43,923 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for abda25881cce04893ba95e29daa6eb15 2024-11-18T20:22:43,925 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/default/TestLogRolling-testLogRollOnPipelineRestart/abda25881cce04893ba95e29daa6eb15 2024-11-18T20:22:43,928 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/default/TestLogRolling-testLogRollOnPipelineRestart/abda25881cce04893ba95e29daa6eb15 2024-11-18T20:22:43,929 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for abda25881cce04893ba95e29daa6eb15 2024-11-18T20:22:43,929 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for abda25881cce04893ba95e29daa6eb15 2024-11-18T20:22:43,930 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for abda25881cce04893ba95e29daa6eb15 2024-11-18T20:22:43,940 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/default/TestLogRolling-testLogRollOnPipelineRestart/abda25881cce04893ba95e29daa6eb15/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:22:43,940 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened abda25881cce04893ba95e29daa6eb15; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=775438, jitterRate=-0.013980686664581299}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T20:22:43,940 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for abda25881cce04893ba95e29daa6eb15 2024-11-18T20:22:43,941 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for abda25881cce04893ba95e29daa6eb15: Running coprocessor pre-open hook at 1731961363919Writing region info on filesystem at 1731961363919Initializing all the Stores at 1731961363920 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961363920Cleaning up temporary data from old regions at 1731961363929 (+9 ms)Running coprocessor post-open hooks at 1731961363940 (+11 ms)Region opened successfully at 1731961363941 (+1 ms) 2024-11-18T20:22:43,945 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15., pid=6, masterSystemTime=1731961363913 2024-11-18T20:22:43,949 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15. 2024-11-18T20:22:43,949 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15. 2024-11-18T20:22:43,950 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=abda25881cce04893ba95e29daa6eb15, regionState=OPEN, openSeqNum=2, regionLocation=5a964fc427ed,44025,1731961362334 2024-11-18T20:22:43,954 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure abda25881cce04893ba95e29daa6eb15, server=5a964fc427ed,44025,1731961362334 because future has completed 2024-11-18T20:22:43,960 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T20:22:43,960 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure abda25881cce04893ba95e29daa6eb15, server=5a964fc427ed,44025,1731961362334 in 196 msec 2024-11-18T20:22:43,966 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T20:22:43,966 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=abda25881cce04893ba95e29daa6eb15, ASSIGN in 357 msec 2024-11-18T20:22:43,968 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T20:22:43,968 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731961363968"}]},"ts":"1731961363968"} 2024-11-18T20:22:43,972 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-18T20:22:43,974 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T20:22:43,978 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 400 msec 2024-11-18T20:22:44,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:44,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:45,393 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T20:22:45,393 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-18T20:22:45,394 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-18T20:22:45,394 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-18T20:22:45,394 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:22:45,394 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-18T20:22:45,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:45,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:46,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:46,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:47,772 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:47,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:48,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:48,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:48,985 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T20:22:49,020 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:49,020 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:49,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:49,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:49,022 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:49,022 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:49,027 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:49,027 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:49,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:49,030 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:22:49,035 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T20:22:49,035 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-18T20:22:49,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:49,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:50,775 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:50,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:51,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:51,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:52,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:52,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:53,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44109 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T20:22:53,636 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-18T20:22:53,636 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-18T20:22:53,640 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-18T20:22:53,640 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15. 2024-11-18T20:22:53,644 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15., hostname=5a964fc427ed,44025,1731961362334, seqNum=2] 2024-11-18T20:22:53,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:53,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:54,778 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:54,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:55,648 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961362871 2024-11-18T20:22:55,649 WARN [ResponseProcessor for block BP-286708197-172.17.0.2-1731961360674:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-286708197-172.17.0.2-1731961360674:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:55,649 WARN [ResponseProcessor for block BP-286708197-172.17.0.2-1731961360674:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-286708197-172.17.0.2-1731961360674:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:55,649 WARN [ResponseProcessor for block BP-286708197-172.17.0.2-1731961360674:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-286708197-172.17.0.2-1731961360674:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-286708197-172.17.0.2-1731961360674:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:35743,DS-446fa850-72d8-4b21-993a-b79bb5f9b7c9,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:55,649 WARN [DataStreamer for file /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.meta.1731961363469.meta block BP-286708197-172.17.0.2-1731961360674:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-286708197-172.17.0.2-1731961360674:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35743,DS-446fa850-72d8-4b21-993a-b79bb5f9b7c9,DISK], DatanodeInfoWithStorage[127.0.0.1:35369,DS-72442cc8-041e-491f-88ed-391a71633f17,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35743,DS-446fa850-72d8-4b21-993a-b79bb5f9b7c9,DISK]) is bad. 2024-11-18T20:22:55,649 WARN [DataStreamer for file /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/WALs/5a964fc427ed,44109,1731961362211/5a964fc427ed%2C44109%2C1731961362211.1731961362461 block BP-286708197-172.17.0.2-1731961360674:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-286708197-172.17.0.2-1731961360674:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35743,DS-446fa850-72d8-4b21-993a-b79bb5f9b7c9,DISK], DatanodeInfoWithStorage[127.0.0.1:35369,DS-72442cc8-041e-491f-88ed-391a71633f17,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35743,DS-446fa850-72d8-4b21-993a-b79bb5f9b7c9,DISK]) is bad. 2024-11-18T20:22:55,650 WARN [DataStreamer for file /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961362871 block BP-286708197-172.17.0.2-1731961360674:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-286708197-172.17.0.2-1731961360674:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35369,DS-72442cc8-041e-491f-88ed-391a71633f17,DISK], DatanodeInfoWithStorage[127.0.0.1:35743,DS-446fa850-72d8-4b21-993a-b79bb5f9b7c9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35743,DS-446fa850-72d8-4b21-993a-b79bb5f9b7c9,DISK]) is bad. 2024-11-18T20:22:55,650 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-424716260_22 at /127.0.0.1:51360 [Receiving block BP-286708197-172.17.0.2-1731961360674:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35743:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51360 dst: /127.0.0.1:35743 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:55,651 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1574681586_22 at /127.0.0.1:54168 [Receiving block BP-286708197-172.17.0.2-1731961360674:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35369:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54168 dst: /127.0.0.1:35369 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:55,651 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-424716260_22 at /127.0.0.1:54210 [Receiving block BP-286708197-172.17.0.2-1731961360674:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35369:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54210 dst: /127.0.0.1:35369 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:55,651 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1574681586_22 at /127.0.0.1:51332 [Receiving block BP-286708197-172.17.0.2-1731961360674:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35743:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51332 dst: /127.0.0.1:35743 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:55,651 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-424716260_22 at /127.0.0.1:54220 [Receiving block BP-286708197-172.17.0.2-1731961360674:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35369:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54220 dst: /127.0.0.1:35369 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:55,652 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-424716260_22 at /127.0.0.1:51350 [Receiving block BP-286708197-172.17.0.2-1731961360674:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35743:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51350 dst: /127.0.0.1:35743 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:55,677 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5921b3be{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:22:55,677 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1118a265{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:22:55,677 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:22:55,678 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8825f29{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:22:55,678 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b27dfb0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/hadoop.log.dir/,STOPPED} 2024-11-18T20:22:55,679 WARN [BP-286708197-172.17.0.2-1731961360674 heartbeating to localhost/127.0.0.1:44059 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:22:55,679 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:22:55,679 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:22:55,679 WARN [BP-286708197-172.17.0.2-1731961360674 heartbeating to localhost/127.0.0.1:44059 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-286708197-172.17.0.2-1731961360674 (Datanode Uuid 4b0d931c-1d02-4523-beb0-94685c199429) service to localhost/127.0.0.1:44059 2024-11-18T20:22:55,679 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/cluster_75eed8fb-572f-9b52-9aa7-b7959e3421b4/data/data3/current/BP-286708197-172.17.0.2-1731961360674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:22:55,679 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/cluster_75eed8fb-572f-9b52-9aa7-b7959e3421b4/data/data4/current/BP-286708197-172.17.0.2-1731961360674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:22:55,680 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:22:55,689 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:22:55,694 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:22:55,695 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:22:55,695 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:22:55,695 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:22:55,696 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e979747{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:22:55,696 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@718ea2f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:22:55,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:55,789 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35808fda{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/java.io.tmpdir/jetty-localhost-40791-hadoop-hdfs-3_4_1-tests_jar-_-any-8732872922127159425/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:22:55,789 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@57d5f4b3{HTTP/1.1, (http/1.1)}{localhost:40791} 2024-11-18T20:22:55,789 INFO [Time-limited test {}] server.Server(415): Started @167281ms 2024-11-18T20:22:55,790 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:22:55,806 WARN [ResponseProcessor for block BP-286708197-172.17.0.2-1731961360674:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-286708197-172.17.0.2-1731961360674:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:55,806 WARN [ResponseProcessor for block BP-286708197-172.17.0.2-1731961360674:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-286708197-172.17.0.2-1731961360674:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:55,806 WARN [ResponseProcessor for block BP-286708197-172.17.0.2-1731961360674:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-286708197-172.17.0.2-1731961360674:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:55,807 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-424716260_22 at /127.0.0.1:34420 [Receiving block BP-286708197-172.17.0.2-1731961360674:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35369:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34420 dst: /127.0.0.1:35369 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:55,807 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-424716260_22 at /127.0.0.1:34422 [Receiving block BP-286708197-172.17.0.2-1731961360674:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35369:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34422 dst: /127.0.0.1:35369 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:55,807 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1574681586_22 at /127.0.0.1:34436 [Receiving block BP-286708197-172.17.0.2-1731961360674:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35369:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34436 dst: /127.0.0.1:35369 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:22:55,814 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6c3df9c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:22:55,814 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@623a52f4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:22:55,814 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:22:55,814 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30a928dc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:22:55,815 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7353ad08{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/hadoop.log.dir/,STOPPED} 2024-11-18T20:22:55,816 WARN [BP-286708197-172.17.0.2-1731961360674 heartbeating to localhost/127.0.0.1:44059 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:22:55,816 WARN [BP-286708197-172.17.0.2-1731961360674 heartbeating to localhost/127.0.0.1:44059 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-286708197-172.17.0.2-1731961360674 (Datanode Uuid 9967eb4e-3039-43d6-92a8-831875a4513a) service to localhost/127.0.0.1:44059 2024-11-18T20:22:55,816 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/cluster_75eed8fb-572f-9b52-9aa7-b7959e3421b4/data/data1/current/BP-286708197-172.17.0.2-1731961360674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:22:55,816 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/cluster_75eed8fb-572f-9b52-9aa7-b7959e3421b4/data/data2/current/BP-286708197-172.17.0.2-1731961360674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:22:55,816 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:22:55,816 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:22:55,817 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:22:55,826 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:22:55,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:55,829 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:22:55,830 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:22:55,830 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:22:55,830 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:22:55,830 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4022a798{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:22:55,831 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@760c54cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:22:55,928 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3e29ac6f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/java.io.tmpdir/jetty-localhost-37699-hadoop-hdfs-3_4_1-tests_jar-_-any-2739351914180807832/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:22:55,928 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64685bd7{HTTP/1.1, (http/1.1)}{localhost:37699} 2024-11-18T20:22:55,928 INFO [Time-limited test {}] server.Server(415): Started @167420ms 2024-11-18T20:22:55,930 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:22:56,058 WARN [Thread-1332 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:22:56,060 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x37d742087ec44296 with lease ID 0xd6797ffc66fbcf6: from storage DS-446fa850-72d8-4b21-993a-b79bb5f9b7c9 node DatanodeRegistration(127.0.0.1:45521, datanodeUuid=4b0d931c-1d02-4523-beb0-94685c199429, infoPort=37925, infoSecurePort=0, ipcPort=36925, storageInfo=lv=-57;cid=testClusterID;nsid=629368935;c=1731961360674), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:22:56,060 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x37d742087ec44296 with lease ID 0xd6797ffc66fbcf6: from storage DS-b7d9f6fe-2a27-44e1-8e2e-528f765c3628 node DatanodeRegistration(127.0.0.1:45521, datanodeUuid=4b0d931c-1d02-4523-beb0-94685c199429, infoPort=37925, infoSecurePort=0, ipcPort=36925, storageInfo=lv=-57;cid=testClusterID;nsid=629368935;c=1731961360674), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:22:56,232 WARN [Thread-1353 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:22:56,234 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x696236840abfc790 with lease ID 0xd6797ffc66fbcf7: from storage DS-72442cc8-041e-491f-88ed-391a71633f17 node DatanodeRegistration(127.0.0.1:45659, datanodeUuid=9967eb4e-3039-43d6-92a8-831875a4513a, infoPort=36647, infoSecurePort=0, ipcPort=46379, storageInfo=lv=-57;cid=testClusterID;nsid=629368935;c=1731961360674), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:22:56,234 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x696236840abfc790 with lease ID 0xd6797ffc66fbcf7: from storage DS-18c31870-3a2c-48b5-ad95-07e09b10b23d node DatanodeRegistration(127.0.0.1:45659, datanodeUuid=9967eb4e-3039-43d6-92a8-831875a4513a, infoPort=36647, infoSecurePort=0, ipcPort=46379, storageInfo=lv=-57;cid=testClusterID;nsid=629368935;c=1731961360674), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:22:56,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:56,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:56,977 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-18T20:22:56,982 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-18T20:22:56,985 ERROR [FSHLog-0-hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa-prefix:5a964fc427ed,44025,1731961362334 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35369,DS-72442cc8-041e-491f-88ed-391a71633f17,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:56,985 WARN [FSHLog-0-hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa-prefix:5a964fc427ed,44025,1731961362334 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35369,DS-72442cc8-041e-491f-88ed-391a71633f17,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:56,985 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5a964fc427ed%2C44025%2C1731961362334:(num 1731961362871) roll requested 2024-11-18T20:22:56,986 INFO [regionserver/5a964fc427ed:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C44025%2C1731961362334.1731961376986 2024-11-18T20:22:56,994 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961362871 newFile=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961376986 2024-11-18T20:22:56,994 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:56,995 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:56,995 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:56,995 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:56,995 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:22:56,996 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961362871 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961376986 2024-11-18T20:22:56,996 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35369,DS-72442cc8-041e-491f-88ed-391a71633f17,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:56,996 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35369,DS-72442cc8-041e-491f-88ed-391a71633f17,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:22:56,996 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961362871 2024-11-18T20:22:56,997 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37925:37925),(127.0.0.1/127.0.0.1:36647:36647)] 2024-11-18T20:22:56,997 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961362871 is not closed yet, will try archiving it next time 2024-11-18T20:22:56,997 WARN [IPC Server handler 2 on default port 44059 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961362871 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-18T20:22:56,998 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961362871 after 2ms 2024-11-18T20:22:57,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:57,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:58,781 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:58,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:59,001 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-18T20:22:59,782 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:22:59,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:00,064 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-18T20:23:00,783 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:00,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:00,999 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961362871 after 4003ms 2024-11-18T20:23:01,005 WARN [ResponseProcessor for block BP-286708197-172.17.0.2-1731961360674:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-286708197-172.17.0.2-1731961360674:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-286708197-172.17.0.2-1731961360674:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:45659,DS-72442cc8-041e-491f-88ed-391a71633f17,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:23:01,005 WARN [DataStreamer for file /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961376986 block BP-286708197-172.17.0.2-1731961360674:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-286708197-172.17.0.2-1731961360674:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45521,DS-446fa850-72d8-4b21-993a-b79bb5f9b7c9,DISK], DatanodeInfoWithStorage[127.0.0.1:45659,DS-72442cc8-041e-491f-88ed-391a71633f17,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45659,DS-72442cc8-041e-491f-88ed-391a71633f17,DISK]) is bad. 2024-11-18T20:23:01,006 WARN [PacketResponder: BP-286708197-172.17.0.2-1731961360674:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:45659] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:23:01,007 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-424716260_22 at /127.0.0.1:55292 [Receiving block BP-286708197-172.17.0.2-1731961360674:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55292 dst: /127.0.0.1:45521 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:23:01,007 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-424716260_22 at /127.0.0.1:51332 [Receiving block BP-286708197-172.17.0.2-1731961360674:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51332 dst: /127.0.0.1:45659 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:23:01,054 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3e29ac6f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:23:01,055 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64685bd7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:23:01,055 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:23:01,056 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@760c54cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:23:01,056 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4022a798{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/hadoop.log.dir/,STOPPED} 2024-11-18T20:23:01,058 WARN [BP-286708197-172.17.0.2-1731961360674 heartbeating to localhost/127.0.0.1:44059 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:23:01,058 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:23:01,059 WARN [BP-286708197-172.17.0.2-1731961360674 heartbeating to localhost/127.0.0.1:44059 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-286708197-172.17.0.2-1731961360674 (Datanode Uuid 9967eb4e-3039-43d6-92a8-831875a4513a) service to localhost/127.0.0.1:44059 2024-11-18T20:23:01,059 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:23:01,059 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/cluster_75eed8fb-572f-9b52-9aa7-b7959e3421b4/data/data1/current/BP-286708197-172.17.0.2-1731961360674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:23:01,060 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/cluster_75eed8fb-572f-9b52-9aa7-b7959e3421b4/data/data2/current/BP-286708197-172.17.0.2-1731961360674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:23:01,060 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:23:01,068 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:23:01,072 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:23:01,074 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:23:01,074 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:23:01,074 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:23:01,075 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21d94b42{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:23:01,075 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24dbb8ea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:23:01,167 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2f748d96{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/java.io.tmpdir/jetty-localhost-45049-hadoop-hdfs-3_4_1-tests_jar-_-any-6520537264909697316/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:23:01,168 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@c9115f6{HTTP/1.1, (http/1.1)}{localhost:45049} 2024-11-18T20:23:01,168 INFO [Time-limited test {}] server.Server(415): Started @172659ms 2024-11-18T20:23:01,169 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:23:01,190 WARN [ResponseProcessor for block BP-286708197-172.17.0.2-1731961360674:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-286708197-172.17.0.2-1731961360674:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:23:01,190 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-424716260_22 at /127.0.0.1:55308 [Receiving block BP-286708197-172.17.0.2-1731961360674:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55308 dst: /127.0.0.1:45521 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:23:01,194 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@35808fda{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:23:01,195 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@57d5f4b3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:23:01,195 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:23:01,195 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@718ea2f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:23:01,195 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e979747{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/hadoop.log.dir/,STOPPED} 2024-11-18T20:23:01,196 WARN [BP-286708197-172.17.0.2-1731961360674 heartbeating to localhost/127.0.0.1:44059 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:23:01,196 WARN [BP-286708197-172.17.0.2-1731961360674 heartbeating to localhost/127.0.0.1:44059 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-286708197-172.17.0.2-1731961360674 (Datanode Uuid 4b0d931c-1d02-4523-beb0-94685c199429) service to localhost/127.0.0.1:44059 2024-11-18T20:23:01,196 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:23:01,196 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:23:01,196 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/cluster_75eed8fb-572f-9b52-9aa7-b7959e3421b4/data/data3/current/BP-286708197-172.17.0.2-1731961360674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:23:01,197 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/cluster_75eed8fb-572f-9b52-9aa7-b7959e3421b4/data/data4/current/BP-286708197-172.17.0.2-1731961360674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:23:01,197 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:23:01,209 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:23:01,212 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:23:01,213 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:23:01,213 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:23:01,213 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:23:01,214 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e79e191{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:23:01,214 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e60361d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:23:01,311 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@56b0b8ef{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/java.io.tmpdir/jetty-localhost-39943-hadoop-hdfs-3_4_1-tests_jar-_-any-7878977370653612048/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:23:01,311 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@16fecb8d{HTTP/1.1, (http/1.1)}{localhost:39943} 2024-11-18T20:23:01,312 INFO [Time-limited test {}] server.Server(415): Started @172803ms 2024-11-18T20:23:01,313 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:23:01,443 WARN [Thread-1406 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:23:01,445 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe5bfe62fff87c127 with lease ID 0xd6797ffc66fbcf8: from storage DS-72442cc8-041e-491f-88ed-391a71633f17 node DatanodeRegistration(127.0.0.1:45389, datanodeUuid=9967eb4e-3039-43d6-92a8-831875a4513a, infoPort=36999, infoSecurePort=0, ipcPort=34883, storageInfo=lv=-57;cid=testClusterID;nsid=629368935;c=1731961360674), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:23:01,445 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe5bfe62fff87c127 with lease ID 0xd6797ffc66fbcf8: from storage DS-18c31870-3a2c-48b5-ad95-07e09b10b23d node DatanodeRegistration(127.0.0.1:45389, datanodeUuid=9967eb4e-3039-43d6-92a8-831875a4513a, infoPort=36999, infoSecurePort=0, ipcPort=34883, storageInfo=lv=-57;cid=testClusterID;nsid=629368935;c=1731961360674), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:23:01,617 WARN [Thread-1427 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:23:01,619 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xea420e00337e745e with lease ID 0xd6797ffc66fbcf9: from storage DS-446fa850-72d8-4b21-993a-b79bb5f9b7c9 node DatanodeRegistration(127.0.0.1:32867, datanodeUuid=4b0d931c-1d02-4523-beb0-94685c199429, infoPort=40547, infoSecurePort=0, ipcPort=40447, storageInfo=lv=-57;cid=testClusterID;nsid=629368935;c=1731961360674), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:23:01,619 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xea420e00337e745e with lease ID 0xd6797ffc66fbcf9: from storage DS-b7d9f6fe-2a27-44e1-8e2e-528f765c3628 node DatanodeRegistration(127.0.0.1:32867, datanodeUuid=4b0d931c-1d02-4523-beb0-94685c199429, infoPort=40547, infoSecurePort=0, ipcPort=40447, storageInfo=lv=-57;cid=testClusterID;nsid=629368935;c=1731961360674), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:23:01,784 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:01,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:02,331 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-18T20:23:02,333 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-18T20:23:02,335 ERROR [FSHLog-0-hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa-prefix:5a964fc427ed,44025,1731961362334 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-446fa850-72d8-4b21-993a-b79bb5f9b7c9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:23:02,335 WARN [FSHLog-0-hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa-prefix:5a964fc427ed,44025,1731961362334 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-446fa850-72d8-4b21-993a-b79bb5f9b7c9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:23:02,335 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5a964fc427ed%2C44025%2C1731961362334:(num 1731961376986) roll requested 2024-11-18T20:23:02,335 INFO [regionserver/5a964fc427ed:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C44025%2C1731961362334.1731961382335 2024-11-18T20:23:02,341 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961376986 newFile=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961382335 2024-11-18T20:23:02,341 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:02,341 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:02,342 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:02,342 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:02,342 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:02,342 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961376986 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961382335 2024-11-18T20:23:02,342 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-446fa850-72d8-4b21-993a-b79bb5f9b7c9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:23:02,342 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-446fa850-72d8-4b21-993a-b79bb5f9b7c9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:23:02,342 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961376986 2024-11-18T20:23:02,343 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36999:36999),(127.0.0.1/127.0.0.1:40547:40547)] 2024-11-18T20:23:02,343 WARN [IPC Server handler 3 on default port 44059 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961376986 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-18T20:23:02,343 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961376986 is not closed yet, will try archiving it next time 2024-11-18T20:23:02,343 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961376986 after 1ms 2024-11-18T20:23:02,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:02,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:03,787 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:03,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:04,345 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C44025%2C1731961362334.1731961384345 2024-11-18T20:23:04,355 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961382335 newFile=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961384345 2024-11-18T20:23:04,356 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:04,356 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:04,356 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:04,356 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:04,357 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:04,357 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961382335 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961384345 2024-11-18T20:23:04,358 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36999:36999),(127.0.0.1/127.0.0.1:40547:40547)] 2024-11-18T20:23:04,358 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961376986 is not closed yet, will try archiving it next time 2024-11-18T20:23:04,358 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961382335 is not closed yet, will try archiving it next time 2024-11-18T20:23:04,359 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961362871 2024-11-18T20:23:04,359 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961362871 2024-11-18T20:23:04,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741838_1019 (size=1264) 2024-11-18T20:23:04,360 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961362871 after 0ms 2024-11-18T20:23:04,360 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961362871 2024-11-18T20:23:04,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45389 is added to blk_1073741838_1019 (size=1264) 2024-11-18T20:23:04,360 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961376986 is not closed yet, will try archiving it next time 2024-11-18T20:23:04,372 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731961363941/Put/vlen=218/seqid=0] 2024-11-18T20:23:04,373 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731961373646/Put/vlen=1045/seqid=0] 2024-11-18T20:23:04,373 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961362871 2024-11-18T20:23:04,373 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961376986 2024-11-18T20:23:04,373 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961376986 2024-11-18T20:23:04,374 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961376986 after 1ms 2024-11-18T20:23:04,374 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961376986 2024-11-18T20:23:04,377 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731961376984/Put/vlen=1045/seqid=0] 2024-11-18T20:23:04,377 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731961379002/Put/vlen=1045/seqid=0] 2024-11-18T20:23:04,378 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961376986 2024-11-18T20:23:04,378 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961382335 2024-11-18T20:23:04,378 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961382335 2024-11-18T20:23:04,378 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961382335 after 0ms 2024-11-18T20:23:04,378 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961382335 2024-11-18T20:23:04,381 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731961382334/Put/vlen=1045/seqid=0] 2024-11-18T20:23:04,381 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961384345 2024-11-18T20:23:04,382 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961384345 2024-11-18T20:23:04,382 WARN [IPC Server handler 1 on default port 44059 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961384345 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-18T20:23:04,382 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961384345 after 0ms 2024-11-18T20:23:04,624 WARN [ResponseProcessor for block BP-286708197-172.17.0.2-1731961360674:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-286708197-172.17.0.2-1731961360674:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:23:04,624 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1574681586_22 at /127.0.0.1:46670 [Receiving block BP-286708197-172.17.0.2-1731961360674:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:45389:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46670 dst: /127.0.0.1:45389 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:45389 remote=/127.0.0.1:46670]. Total timeout mills is 60000, 59731 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:23:04,624 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1574681586_22 at /127.0.0.1:59904 [Receiving block BP-286708197-172.17.0.2-1731961360674:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:32867:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59904 dst: /127.0.0.1:32867 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:23:04,624 WARN [DataStreamer for file /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961384345 block BP-286708197-172.17.0.2-1731961360674:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-286708197-172.17.0.2-1731961360674:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45389,DS-72442cc8-041e-491f-88ed-391a71633f17,DISK], DatanodeInfoWithStorage[127.0.0.1:32867,DS-446fa850-72d8-4b21-993a-b79bb5f9b7c9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45389,DS-72442cc8-041e-491f-88ed-391a71633f17,DISK]) is bad. 2024-11-18T20:23:04,625 WARN [DataStreamer for file /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961384345 block BP-286708197-172.17.0.2-1731961360674:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-286708197-172.17.0.2-1731961360674:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:23:04,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45389 is added to blk_1073741839_1022 (size=85) 2024-11-18T20:23:04,788 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:04,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:05,445 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-18T20:23:05,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:05,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:06,345 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961376986 after 4003ms 2024-11-18T20:23:06,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:06,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:07,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:07,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:08,384 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961384345 after 4002ms 2024-11-18T20:23:08,384 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961384345 2024-11-18T20:23:08,393 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961384345 2024-11-18T20:23:08,393 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing abda25881cce04893ba95e29daa6eb15 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-18T20:23:08,394 ERROR [FSHLog-0-hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa-prefix:5a964fc427ed,44025,1731961362334 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-286708197-172.17.0.2-1731961360674:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:23:08,394 WARN [FSHLog-0-hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa-prefix:5a964fc427ed,44025,1731961362334 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-286708197-172.17.0.2-1731961360674:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:23:08,395 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5a964fc427ed%2C44025%2C1731961362334:(num 1731961384345) roll requested 2024-11-18T20:23:08,395 INFO [regionserver/5a964fc427ed:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C44025%2C1731961362334.1731961388395 2024-11-18T20:23:08,402 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961384345 newFile=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961388395 2024-11-18T20:23:08,402 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:08,402 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:08,403 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:08,403 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:08,403 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:08,403 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961384345 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961388395 2024-11-18T20:23:08,403 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-286708197-172.17.0.2-1731961360674:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:23:08,403 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40547:40547),(127.0.0.1/127.0.0.1:36999:36999)] 2024-11-18T20:23:08,403 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-286708197-172.17.0.2-1731961360674:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:23:08,404 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961384345 is not closed yet, will try archiving it next time 2024-11-18T20:23:08,404 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961384345 2024-11-18T20:23:08,404 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961384345 after 0ms 2024-11-18T20:23:08,404 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.1731961384345 to hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/oldWALs/5a964fc427ed%2C44025%2C1731961362334.1731961384345 2024-11-18T20:23:08,418 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/default/TestLogRolling-testLogRollOnPipelineRestart/abda25881cce04893ba95e29daa6eb15/.tmp/info/2e3927f55a48442d82f0ea785c9014f3 is 1080, key is row1002/info:/1731961373646/Put/seqid=0 2024-11-18T20:23:08,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45389 is added to blk_1073741841_1024 (size=9270) 2024-11-18T20:23:08,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741841_1024 (size=9270) 2024-11-18T20:23:08,423 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/default/TestLogRolling-testLogRollOnPipelineRestart/abda25881cce04893ba95e29daa6eb15/.tmp/info/2e3927f55a48442d82f0ea785c9014f3 2024-11-18T20:23:08,429 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/default/TestLogRolling-testLogRollOnPipelineRestart/abda25881cce04893ba95e29daa6eb15/.tmp/info/2e3927f55a48442d82f0ea785c9014f3 as hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/default/TestLogRolling-testLogRollOnPipelineRestart/abda25881cce04893ba95e29daa6eb15/info/2e3927f55a48442d82f0ea785c9014f3 2024-11-18T20:23:08,435 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/default/TestLogRolling-testLogRollOnPipelineRestart/abda25881cce04893ba95e29daa6eb15/info/2e3927f55a48442d82f0ea785c9014f3, entries=4, sequenceid=8, filesize=9.1 K 2024-11-18T20:23:08,436 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for abda25881cce04893ba95e29daa6eb15 in 43ms, sequenceid=8, compaction requested=false 2024-11-18T20:23:08,436 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for abda25881cce04893ba95e29daa6eb15: 2024-11-18T20:23:08,436 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-18T20:23:08,437 ERROR [FSHLog-0-hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa-prefix:5a964fc427ed,44025,1731961362334.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35369,DS-72442cc8-041e-491f-88ed-391a71633f17,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:23:08,437 WARN [FSHLog-0-hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa-prefix:5a964fc427ed,44025,1731961362334.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35369,DS-72442cc8-041e-491f-88ed-391a71633f17,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:23:08,437 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5a964fc427ed%2C44025%2C1731961362334.meta:.meta(num 1731961363469) roll requested 2024-11-18T20:23:08,437 INFO [regionserver/5a964fc427ed:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C44025%2C1731961362334.meta.1731961388437.meta 2024-11-18T20:23:08,442 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:08,442 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:08,442 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:08,442 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:08,442 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:08,442 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.meta.1731961363469.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.meta.1731961388437.meta 2024-11-18T20:23:08,443 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35369,DS-72442cc8-041e-491f-88ed-391a71633f17,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:23:08,443 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35369,DS-72442cc8-041e-491f-88ed-391a71633f17,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:23:08,443 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.meta.1731961363469.meta 2024-11-18T20:23:08,443 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36999:36999),(127.0.0.1/127.0.0.1:40547:40547)] 2024-11-18T20:23:08,443 DEBUG [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.meta.1731961363469.meta is not closed yet, will try archiving it next time 2024-11-18T20:23:08,443 WARN [IPC Server handler 3 on default port 44059 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.meta.1731961363469.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1013 2024-11-18T20:23:08,443 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.meta.1731961363469.meta after 0ms 2024-11-18T20:23:08,459 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/hbase/meta/1588230740/.tmp/info/d11130bb68cc4d8ab44c97851bb59e58 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15./info:regioninfo/1731961363950/Put/seqid=0 2024-11-18T20:23:08,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741843_1027 (size=7125) 2024-11-18T20:23:08,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45389 is added to blk_1073741843_1027 (size=7125) 2024-11-18T20:23:08,464 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/hbase/meta/1588230740/.tmp/info/d11130bb68cc4d8ab44c97851bb59e58 2024-11-18T20:23:08,482 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/hbase/meta/1588230740/.tmp/ns/1559ff19431245a5ae7edde7c58c4399 is 43, key is default/ns:d/1731961363536/Put/seqid=0 2024-11-18T20:23:08,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45389 is added to blk_1073741844_1028 (size=5153) 2024-11-18T20:23:08,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741844_1028 (size=5153) 2024-11-18T20:23:08,487 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/hbase/meta/1588230740/.tmp/ns/1559ff19431245a5ae7edde7c58c4399 2024-11-18T20:23:08,508 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/hbase/meta/1588230740/.tmp/table/d4201899824d48b7a4857e913180d771 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731961363968/Put/seqid=0 2024-11-18T20:23:08,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45389 is added to blk_1073741845_1029 (size=5438) 2024-11-18T20:23:08,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741845_1029 (size=5438) 2024-11-18T20:23:08,513 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/hbase/meta/1588230740/.tmp/table/d4201899824d48b7a4857e913180d771 2024-11-18T20:23:08,519 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/hbase/meta/1588230740/.tmp/info/d11130bb68cc4d8ab44c97851bb59e58 as hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/hbase/meta/1588230740/info/d11130bb68cc4d8ab44c97851bb59e58 2024-11-18T20:23:08,525 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/hbase/meta/1588230740/info/d11130bb68cc4d8ab44c97851bb59e58, entries=10, sequenceid=11, filesize=7.0 K 2024-11-18T20:23:08,526 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/hbase/meta/1588230740/.tmp/ns/1559ff19431245a5ae7edde7c58c4399 as hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/hbase/meta/1588230740/ns/1559ff19431245a5ae7edde7c58c4399 2024-11-18T20:23:08,531 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/hbase/meta/1588230740/ns/1559ff19431245a5ae7edde7c58c4399, entries=2, sequenceid=11, filesize=5.0 K 2024-11-18T20:23:08,532 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/hbase/meta/1588230740/.tmp/table/d4201899824d48b7a4857e913180d771 as hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/hbase/meta/1588230740/table/d4201899824d48b7a4857e913180d771 2024-11-18T20:23:08,538 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/hbase/meta/1588230740/table/d4201899824d48b7a4857e913180d771, entries=2, sequenceid=11, filesize=5.3 K 2024-11-18T20:23:08,539 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 103ms, sequenceid=11, compaction requested=false 2024-11-18T20:23:08,540 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-18T20:23:08,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T20:23:08,545 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:23:08,545 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:23:08,545 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:23:08,545 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:23:08,545 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T20:23:08,545 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T20:23:08,545 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2083436526, stopped=false 2024-11-18T20:23:08,546 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5a964fc427ed,44109,1731961362211 2024-11-18T20:23:08,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44025-0x10150cadcbb0001, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:23:08,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:23:08,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:23:08,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44025-0x10150cadcbb0001, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:23:08,573 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:23:08,573 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:23:08,573 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:23:08,573 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:23:08,573 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:23:08,573 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5a964fc427ed,44025,1731961362334' ***** 2024-11-18T20:23:08,573 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T20:23:08,573 INFO [RS:0;5a964fc427ed:44025 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T20:23:08,573 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T20:23:08,574 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44025-0x10150cadcbb0001, quorum=127.0.0.1:62599, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:23:08,574 INFO [RS:0;5a964fc427ed:44025 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T20:23:08,574 INFO [RS:0;5a964fc427ed:44025 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T20:23:08,574 INFO [RS:0;5a964fc427ed:44025 {}] regionserver.HRegionServer(3091): Received CLOSE for abda25881cce04893ba95e29daa6eb15 2024-11-18T20:23:08,574 INFO [RS:0;5a964fc427ed:44025 {}] regionserver.HRegionServer(959): stopping server 5a964fc427ed,44025,1731961362334 2024-11-18T20:23:08,574 INFO [RS:0;5a964fc427ed:44025 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:23:08,574 INFO [RS:0;5a964fc427ed:44025 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5a964fc427ed:44025. 2024-11-18T20:23:08,574 DEBUG [RS:0;5a964fc427ed:44025 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:23:08,574 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing abda25881cce04893ba95e29daa6eb15, disabling compactions & flushes 2024-11-18T20:23:08,574 DEBUG [RS:0;5a964fc427ed:44025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:23:08,574 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15. 2024-11-18T20:23:08,574 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15. 2024-11-18T20:23:08,574 INFO [RS:0;5a964fc427ed:44025 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T20:23:08,574 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15. after waiting 0 ms 2024-11-18T20:23:08,574 INFO [RS:0;5a964fc427ed:44025 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T20:23:08,574 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15. 2024-11-18T20:23:08,574 INFO [RS:0;5a964fc427ed:44025 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T20:23:08,574 INFO [RS:0;5a964fc427ed:44025 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T20:23:08,575 INFO [RS:0;5a964fc427ed:44025 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-18T20:23:08,575 DEBUG [RS:0;5a964fc427ed:44025 {}] regionserver.HRegionServer(1325): Online Regions={abda25881cce04893ba95e29daa6eb15=TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15., 1588230740=hbase:meta,,1.1588230740} 2024-11-18T20:23:08,575 DEBUG [RS:0;5a964fc427ed:44025 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, abda25881cce04893ba95e29daa6eb15 2024-11-18T20:23:08,575 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:23:08,575 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:23:08,575 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:23:08,575 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:23:08,575 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:23:08,578 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-18T20:23:08,578 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/data/default/TestLogRolling-testLogRollOnPipelineRestart/abda25881cce04893ba95e29daa6eb15/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-18T20:23:08,579 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:23:08,579 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:23:08,579 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15. 2024-11-18T20:23:08,579 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961388575Running coprocessor pre-close hooks at 1731961388575Disabling compacts and flushes for region at 1731961388575Disabling writes for close at 1731961388575Writing region close event to WAL at 1731961388576 (+1 ms)Running coprocessor post-close hooks at 1731961388579 (+3 ms)Closed at 1731961388579 2024-11-18T20:23:08,579 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for abda25881cce04893ba95e29daa6eb15: Waiting for close lock at 1731961388574Running coprocessor pre-close hooks at 1731961388574Disabling compacts and flushes for region at 1731961388574Disabling writes for close at 1731961388574Writing region close event to WAL at 1731961388575 (+1 ms)Running coprocessor post-close hooks at 1731961388579 (+4 ms)Closed at 1731961388579 2024-11-18T20:23:08,579 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T20:23:08,579 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731961363574.abda25881cce04893ba95e29daa6eb15. 2024-11-18T20:23:08,724 INFO [regionserver/5a964fc427ed:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T20:23:08,724 INFO [regionserver/5a964fc427ed:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T20:23:08,726 INFO [regionserver/5a964fc427ed:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:23:08,775 INFO [RS:0;5a964fc427ed:44025 {}] regionserver.HRegionServer(976): stopping server 5a964fc427ed,44025,1731961362334; all regions closed. 2024-11-18T20:23:08,776 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:08,776 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:08,776 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:08,776 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:08,776 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:08,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741842_1025 (size=825) 2024-11-18T20:23:08,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45389 is added to blk_1073741842_1025 (size=825) 2024-11-18T20:23:08,792 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:08,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:09,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:09,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:10,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:10,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:11,619 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-18T20:23:11,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:11,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:12,191 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T20:23:12,444 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.meta.1731961363469.meta after 4001ms 2024-11-18T20:23:12,445 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/WALs/5a964fc427ed,44025,1731961362334/5a964fc427ed%2C44025%2C1731961362334.meta.1731961363469.meta to hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/oldWALs/5a964fc427ed%2C44025%2C1731961362334.meta.1731961363469.meta 2024-11-18T20:23:12,447 DEBUG [RS:0;5a964fc427ed:44025 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/oldWALs 2024-11-18T20:23:12,448 INFO [RS:0;5a964fc427ed:44025 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5a964fc427ed%2C44025%2C1731961362334.meta:.meta(num 1731961388437) 2024-11-18T20:23:12,448 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:12,448 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:12,448 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:12,448 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:12,448 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:12,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45389 is added to blk_1073741840_1023 (size=1162) 2024-11-18T20:23:12,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741840_1023 (size=1162) 2024-11-18T20:23:12,465 DEBUG [RS:0;5a964fc427ed:44025 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/oldWALs 2024-11-18T20:23:12,465 INFO [RS:0;5a964fc427ed:44025 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5a964fc427ed%2C44025%2C1731961362334:(num 1731961388395) 2024-11-18T20:23:12,465 DEBUG [RS:0;5a964fc427ed:44025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:23:12,465 INFO [RS:0;5a964fc427ed:44025 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:23:12,465 INFO [RS:0;5a964fc427ed:44025 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:23:12,466 INFO [RS:0;5a964fc427ed:44025 {}] hbase.ChoreService(370): Chore service for: regionserver/5a964fc427ed:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T20:23:12,466 INFO [RS:0;5a964fc427ed:44025 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:23:12,466 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:23:12,466 INFO [RS:0;5a964fc427ed:44025 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44025 2024-11-18T20:23:12,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44025-0x10150cadcbb0001, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5a964fc427ed,44025,1731961362334 2024-11-18T20:23:12,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:23:12,495 INFO [RS:0;5a964fc427ed:44025 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:23:12,506 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5a964fc427ed,44025,1731961362334] 2024-11-18T20:23:12,514 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5a964fc427ed,44025,1731961362334 already deleted, retry=false 2024-11-18T20:23:12,514 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5a964fc427ed,44025,1731961362334 expired; onlineServers=0 2024-11-18T20:23:12,514 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5a964fc427ed,44109,1731961362211' ***** 2024-11-18T20:23:12,514 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T20:23:12,514 INFO [M:0;5a964fc427ed:44109 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:23:12,514 INFO [M:0;5a964fc427ed:44109 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:23:12,515 DEBUG [M:0;5a964fc427ed:44109 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T20:23:12,515 DEBUG [M:0;5a964fc427ed:44109 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T20:23:12,515 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T20:23:12,515 DEBUG [master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.small.0-1731961362636 {}] cleaner.HFileCleaner(306): Exit Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.small.0-1731961362636,5,FailOnTimeoutGroup] 2024-11-18T20:23:12,515 DEBUG [master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.large.0-1731961362636 {}] cleaner.HFileCleaner(306): Exit Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.large.0-1731961362636,5,FailOnTimeoutGroup] 2024-11-18T20:23:12,515 INFO [M:0;5a964fc427ed:44109 {}] hbase.ChoreService(370): Chore service for: master/5a964fc427ed:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T20:23:12,516 INFO [M:0;5a964fc427ed:44109 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:23:12,516 DEBUG [M:0;5a964fc427ed:44109 {}] master.HMaster(1795): Stopping service threads 2024-11-18T20:23:12,516 INFO [M:0;5a964fc427ed:44109 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T20:23:12,516 INFO [M:0;5a964fc427ed:44109 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:23:12,516 INFO [M:0;5a964fc427ed:44109 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T20:23:12,516 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T20:23:12,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T20:23:12,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:23:12,522 DEBUG [M:0;5a964fc427ed:44109 {}] zookeeper.ZKUtil(347): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T20:23:12,523 WARN [M:0;5a964fc427ed:44109 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T20:23:12,523 INFO [M:0;5a964fc427ed:44109 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/.lastflushedseqids 2024-11-18T20:23:12,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45389 is added to blk_1073741846_1030 (size=130) 2024-11-18T20:23:12,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741846_1030 (size=130) 2024-11-18T20:23:12,530 INFO [M:0;5a964fc427ed:44109 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T20:23:12,530 INFO [M:0;5a964fc427ed:44109 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T20:23:12,530 DEBUG [M:0;5a964fc427ed:44109 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:23:12,531 INFO [M:0;5a964fc427ed:44109 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:23:12,531 DEBUG [M:0;5a964fc427ed:44109 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:23:12,531 DEBUG [M:0;5a964fc427ed:44109 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:23:12,531 DEBUG [M:0;5a964fc427ed:44109 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:23:12,531 INFO [M:0;5a964fc427ed:44109 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-18T20:23:12,531 ERROR [FSHLog-0-hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData-prefix:5a964fc427ed,44109,1731961362211 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35369,DS-72442cc8-041e-491f-88ed-391a71633f17,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:23:12,531 WARN [FSHLog-0-hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData-prefix:5a964fc427ed,44109,1731961362211 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35369,DS-72442cc8-041e-491f-88ed-391a71633f17,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:23:12,531 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 5a964fc427ed%2C44109%2C1731961362211:(num 1731961362461) roll requested 2024-11-18T20:23:12,532 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C44109%2C1731961362211.1731961392532 2024-11-18T20:23:12,537 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:12,537 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:12,537 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:12,537 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:12,537 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:12,538 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/WALs/5a964fc427ed,44109,1731961362211/5a964fc427ed%2C44109%2C1731961362211.1731961362461 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/WALs/5a964fc427ed,44109,1731961362211/5a964fc427ed%2C44109%2C1731961362211.1731961392532 2024-11-18T20:23:12,538 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35369,DS-72442cc8-041e-491f-88ed-391a71633f17,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:23:12,538 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35369,DS-72442cc8-041e-491f-88ed-391a71633f17,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:23:12,538 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/WALs/5a964fc427ed,44109,1731961362211/5a964fc427ed%2C44109%2C1731961362211.1731961362461 2024-11-18T20:23:12,538 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40547:40547),(127.0.0.1/127.0.0.1:36999:36999)] 2024-11-18T20:23:12,538 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/WALs/5a964fc427ed,44109,1731961362211/5a964fc427ed%2C44109%2C1731961362211.1731961362461 is not closed yet, will try archiving it next time 2024-11-18T20:23:12,539 WARN [IPC Server handler 3 on default port 44059 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/WALs/5a964fc427ed,44109,1731961362211/5a964fc427ed%2C44109%2C1731961362211.1731961362461 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-18T20:23:12,539 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/WALs/5a964fc427ed,44109,1731961362211/5a964fc427ed%2C44109%2C1731961362211.1731961362461 after 1ms 2024-11-18T20:23:12,553 DEBUG [M:0;5a964fc427ed:44109 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b078db365e2a419d93315898260628f6 is 82, key is hbase:meta,,1/info:regioninfo/1731961363500/Put/seqid=0 2024-11-18T20:23:12,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741848_1033 (size=5672) 2024-11-18T20:23:12,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45389 is added to blk_1073741848_1033 (size=5672) 2024-11-18T20:23:12,558 INFO [M:0;5a964fc427ed:44109 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b078db365e2a419d93315898260628f6 2024-11-18T20:23:12,578 DEBUG [M:0;5a964fc427ed:44109 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b1dba4df897c4f14951fa9f6b7092181 is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731961363977/Put/seqid=0 2024-11-18T20:23:12,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45389 is added to blk_1073741849_1034 (size=6119) 2024-11-18T20:23:12,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741849_1034 (size=6119) 2024-11-18T20:23:12,584 INFO [M:0;5a964fc427ed:44109 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b1dba4df897c4f14951fa9f6b7092181 2024-11-18T20:23:12,602 DEBUG [M:0;5a964fc427ed:44109 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2c76ce3f001745ae96ab53cc0136f8de is 69, key is 5a964fc427ed,44025,1731961362334/rs:state/1731961362697/Put/seqid=0 2024-11-18T20:23:12,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44025-0x10150cadcbb0001, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:23:12,606 INFO [RS:0;5a964fc427ed:44025 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:23:12,606 INFO [RS:0;5a964fc427ed:44025 {}] regionserver.HRegionServer(1031): Exiting; stopping=5a964fc427ed,44025,1731961362334; zookeeper connection closed. 2024-11-18T20:23:12,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44025-0x10150cadcbb0001, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:23:12,606 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@b6baad8 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@b6baad8 2024-11-18T20:23:12,606 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T20:23:12,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741850_1035 (size=5156) 2024-11-18T20:23:12,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45389 is added to blk_1073741850_1035 (size=5156) 2024-11-18T20:23:12,608 INFO [M:0;5a964fc427ed:44109 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2c76ce3f001745ae96ab53cc0136f8de 2024-11-18T20:23:12,635 DEBUG [M:0;5a964fc427ed:44109 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c7019f7e4d6b4a399a5d665256970a68 is 52, key is load_balancer_on/state:d/1731961363569/Put/seqid=0 2024-11-18T20:23:12,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741851_1036 (size=5056) 2024-11-18T20:23:12,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45389 is added to blk_1073741851_1036 (size=5056) 2024-11-18T20:23:12,641 INFO [M:0;5a964fc427ed:44109 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c7019f7e4d6b4a399a5d665256970a68 2024-11-18T20:23:12,647 DEBUG [M:0;5a964fc427ed:44109 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b078db365e2a419d93315898260628f6 as hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b078db365e2a419d93315898260628f6 2024-11-18T20:23:12,653 INFO [M:0;5a964fc427ed:44109 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b078db365e2a419d93315898260628f6, entries=8, sequenceid=56, filesize=5.5 K 2024-11-18T20:23:12,654 DEBUG [M:0;5a964fc427ed:44109 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b1dba4df897c4f14951fa9f6b7092181 as hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b1dba4df897c4f14951fa9f6b7092181 2024-11-18T20:23:12,660 INFO [M:0;5a964fc427ed:44109 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b1dba4df897c4f14951fa9f6b7092181, entries=6, sequenceid=56, filesize=6.0 K 2024-11-18T20:23:12,661 DEBUG [M:0;5a964fc427ed:44109 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2c76ce3f001745ae96ab53cc0136f8de as hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2c76ce3f001745ae96ab53cc0136f8de 2024-11-18T20:23:12,667 INFO [M:0;5a964fc427ed:44109 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2c76ce3f001745ae96ab53cc0136f8de, entries=1, sequenceid=56, filesize=5.0 K 2024-11-18T20:23:12,668 DEBUG [M:0;5a964fc427ed:44109 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c7019f7e4d6b4a399a5d665256970a68 as hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c7019f7e4d6b4a399a5d665256970a68 2024-11-18T20:23:12,674 INFO [M:0;5a964fc427ed:44109 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c7019f7e4d6b4a399a5d665256970a68, entries=1, sequenceid=56, filesize=4.9 K 2024-11-18T20:23:12,675 INFO [M:0;5a964fc427ed:44109 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 144ms, sequenceid=56, compaction requested=false 2024-11-18T20:23:12,677 INFO [M:0;5a964fc427ed:44109 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:23:12,677 DEBUG [M:0;5a964fc427ed:44109 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961392530Disabling compacts and flushes for region at 1731961392530Disabling writes for close at 1731961392531 (+1 ms)Obtaining lock to block concurrent updates at 1731961392531Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731961392531Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731961392531Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731961392539 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731961392539Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731961392553 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731961392553Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731961392563 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731961392578 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731961392578Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731961392588 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731961392602 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731961392602Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731961392614 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731961392634 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731961392634Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ecb1360: reopening flushed file at 1731961392646 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@635d2aa3: reopening flushed file at 1731961392653 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ada7df4: reopening flushed file at 1731961392660 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27aba24e: reopening flushed file at 1731961392667 (+7 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 144ms, sequenceid=56, compaction requested=false at 1731961392675 (+8 ms)Writing region close event to WAL at 1731961392676 (+1 ms)Closed at 1731961392676 2024-11-18T20:23:12,677 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:12,677 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:12,677 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:12,677 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:12,677 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:12,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45389 is added to blk_1073741847_1031 (size=757) 2024-11-18T20:23:12,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741847_1031 (size=757) 2024-11-18T20:23:12,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:12,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:13,580 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:13,580 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:13,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:13,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:13,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:13,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:13,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:13,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:13,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:13,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:13,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:13,604 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:13,608 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:13,608 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:13,797 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:13,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:14,112 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T20:23:14,115 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:14,115 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:14,115 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:14,116 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:14,138 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:14,138 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:14,138 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:14,138 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:14,138 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:14,139 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:14,142 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:14,143 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:14,143 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:14,145 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:14,621 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-18T20:23:14,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:14,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:15,393 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:23:15,394 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T20:23:15,394 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-18T20:23:15,394 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-18T20:23:15,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:15,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:16,540 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/WALs/5a964fc427ed,44109,1731961362211/5a964fc427ed%2C44109%2C1731961362211.1731961362461 after 4002ms 2024-11-18T20:23:16,540 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/WALs/5a964fc427ed,44109,1731961362211/5a964fc427ed%2C44109%2C1731961362211.1731961362461 to hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/oldWALs/5a964fc427ed%2C44109%2C1731961362211.1731961362461 2024-11-18T20:23:16,544 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/MasterData/oldWALs/5a964fc427ed%2C44109%2C1731961362211.1731961362461 to hdfs://localhost:44059/user/jenkins/test-data/8dd5b7fb-ded3-456c-db6d-20d6841de3aa/oldWALs/5a964fc427ed%2C44109%2C1731961362211.1731961362461$masterlocalwal$ 2024-11-18T20:23:16,544 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:23:16,544 INFO [M:0;5a964fc427ed:44109 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T20:23:16,545 INFO [M:0;5a964fc427ed:44109 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44109 2024-11-18T20:23:16,545 INFO [M:0;5a964fc427ed:44109 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:23:16,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:23:16,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44109-0x10150cadcbb0000, quorum=127.0.0.1:62599, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:23:16,673 INFO [M:0;5a964fc427ed:44109 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:23:16,676 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@56b0b8ef{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:23:16,676 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@16fecb8d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:23:16,676 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:23:16,677 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e60361d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:23:16,677 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e79e191{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/hadoop.log.dir/,STOPPED} 2024-11-18T20:23:16,678 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:23:16,678 WARN [BP-286708197-172.17.0.2-1731961360674 heartbeating to localhost/127.0.0.1:44059 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:23:16,678 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:23:16,678 WARN [BP-286708197-172.17.0.2-1731961360674 heartbeating to localhost/127.0.0.1:44059 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-286708197-172.17.0.2-1731961360674 (Datanode Uuid 4b0d931c-1d02-4523-beb0-94685c199429) service to localhost/127.0.0.1:44059 2024-11-18T20:23:16,679 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/cluster_75eed8fb-572f-9b52-9aa7-b7959e3421b4/data/data3/current/BP-286708197-172.17.0.2-1731961360674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:23:16,679 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/cluster_75eed8fb-572f-9b52-9aa7-b7959e3421b4/data/data4/current/BP-286708197-172.17.0.2-1731961360674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:23:16,679 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:23:16,685 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2f748d96{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:23:16,686 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@c9115f6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:23:16,686 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:23:16,686 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24dbb8ea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:23:16,686 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21d94b42{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/hadoop.log.dir/,STOPPED} 2024-11-18T20:23:16,688 WARN [BP-286708197-172.17.0.2-1731961360674 heartbeating to localhost/127.0.0.1:44059 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:23:16,688 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:23:16,688 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:23:16,688 WARN [BP-286708197-172.17.0.2-1731961360674 heartbeating to localhost/127.0.0.1:44059 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-286708197-172.17.0.2-1731961360674 (Datanode Uuid 9967eb4e-3039-43d6-92a8-831875a4513a) service to localhost/127.0.0.1:44059 2024-11-18T20:23:16,689 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/cluster_75eed8fb-572f-9b52-9aa7-b7959e3421b4/data/data2/current/BP-286708197-172.17.0.2-1731961360674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:23:16,689 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:23:16,689 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/cluster_75eed8fb-572f-9b52-9aa7-b7959e3421b4/data/data1/current/BP-286708197-172.17.0.2-1731961360674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:23:16,697 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@19160285{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:23:16,698 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@40c321ed{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:23:16,698 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:23:16,698 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45628471{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:23:16,699 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e0e18a9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/hadoop.log.dir/,STOPPED} 2024-11-18T20:23:16,706 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T20:23:16,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T20:23:16,738 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=181 (was 156) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44059 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44059 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44059 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44059 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:44059 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44059 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44059 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44059 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=296 (was 410), ProcessCount=11 (was 11), AvailableMemoryMB=4145 (was 3684) - AvailableMemoryMB LEAK? - 2024-11-18T20:23:16,747 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=181, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=296, ProcessCount=11, AvailableMemoryMB=4144 2024-11-18T20:23:16,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T20:23:16,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/hadoop.log.dir so I do NOT create it in target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf 2024-11-18T20:23:16,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/07d4bab2-c394-bc02-c41d-60eee399b8b2/hadoop.tmp.dir so I do NOT create it in target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf 2024-11-18T20:23:16,748 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/cluster_3d4bfd17-cc4d-3477-ff33-634d9d85c3c9, deleteOnExit=true 2024-11-18T20:23:16,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T20:23:16,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/test.cache.data in system properties and HBase conf 2024-11-18T20:23:16,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T20:23:16,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/hadoop.log.dir in system properties and HBase conf 2024-11-18T20:23:16,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T20:23:16,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T20:23:16,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T20:23:16,748 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T20:23:16,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:23:16,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:23:16,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T20:23:16,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:23:16,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T20:23:16,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T20:23:16,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:23:16,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:23:16,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T20:23:16,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/nfs.dump.dir in system properties and HBase conf 2024-11-18T20:23:16,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/java.io.tmpdir in system properties and HBase conf 2024-11-18T20:23:16,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:23:16,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T20:23:16,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T20:23:16,766 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:23:16,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:16,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:16,984 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:23:16,990 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:23:16,992 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:23:16,992 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:23:16,992 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:23:16,994 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:23:16,995 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@293e66d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:23:16,996 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ac76b28{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:23:17,106 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d483d07{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/java.io.tmpdir/jetty-localhost-40323-hadoop-hdfs-3_4_1-tests_jar-_-any-15963729496748519637/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:23:17,107 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1106c0e7{HTTP/1.1, (http/1.1)}{localhost:40323} 2024-11-18T20:23:17,107 INFO [Time-limited test {}] server.Server(415): Started @188598ms 2024-11-18T20:23:17,120 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:23:17,293 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:23:17,297 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:23:17,302 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:23:17,302 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:23:17,302 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:23:17,303 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2cfed7b4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:23:17,304 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@439cbd97{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:23:17,400 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@43d16ee8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/java.io.tmpdir/jetty-localhost-39747-hadoop-hdfs-3_4_1-tests_jar-_-any-12789551771242976481/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:23:17,400 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5b135886{HTTP/1.1, (http/1.1)}{localhost:39747} 2024-11-18T20:23:17,400 INFO [Time-limited test {}] server.Server(415): Started @188892ms 2024-11-18T20:23:17,402 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:23:17,445 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:23:17,451 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:23:17,455 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:23:17,455 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:23:17,455 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:23:17,455 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10c2896a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:23:17,456 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4bd1d692{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:23:17,557 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2526c219{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/java.io.tmpdir/jetty-localhost-39459-hadoop-hdfs-3_4_1-tests_jar-_-any-7316151163138280530/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:23:17,558 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@74e6f5d9{HTTP/1.1, (http/1.1)}{localhost:39459} 2024-11-18T20:23:17,558 INFO [Time-limited test {}] server.Server(415): Started @189049ms 2024-11-18T20:23:17,559 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:23:17,800 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:17,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:17,963 WARN [Thread-1645 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/cluster_3d4bfd17-cc4d-3477-ff33-634d9d85c3c9/data/data1/current/BP-1810400661-172.17.0.2-1731961396779/current, will proceed with Du for space computation calculation, 2024-11-18T20:23:17,963 WARN [Thread-1646 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/cluster_3d4bfd17-cc4d-3477-ff33-634d9d85c3c9/data/data2/current/BP-1810400661-172.17.0.2-1731961396779/current, will proceed with Du for space computation calculation, 2024-11-18T20:23:17,992 WARN [Thread-1610 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:23:17,997 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5ff99b96de6d117 with lease ID 0x8ccaf8162591a9cc: Processing first storage report for DS-8f2b5849-bf10-41bc-bafe-f3a797a4d661 from datanode DatanodeRegistration(127.0.0.1:44019, datanodeUuid=6cc33458-e10e-4ffa-8be6-125b0f2e35b7, infoPort=36593, infoSecurePort=0, ipcPort=34673, storageInfo=lv=-57;cid=testClusterID;nsid=968077902;c=1731961396779) 2024-11-18T20:23:17,998 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ff99b96de6d117 with lease ID 0x8ccaf8162591a9cc: from storage DS-8f2b5849-bf10-41bc-bafe-f3a797a4d661 node DatanodeRegistration(127.0.0.1:44019, datanodeUuid=6cc33458-e10e-4ffa-8be6-125b0f2e35b7, infoPort=36593, infoSecurePort=0, ipcPort=34673, storageInfo=lv=-57;cid=testClusterID;nsid=968077902;c=1731961396779), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:23:17,998 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5ff99b96de6d117 with lease ID 0x8ccaf8162591a9cc: Processing first storage report for DS-6848afbd-78ab-41e4-83a7-0e289bc5b891 from datanode DatanodeRegistration(127.0.0.1:44019, datanodeUuid=6cc33458-e10e-4ffa-8be6-125b0f2e35b7, infoPort=36593, infoSecurePort=0, ipcPort=34673, storageInfo=lv=-57;cid=testClusterID;nsid=968077902;c=1731961396779) 2024-11-18T20:23:17,998 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ff99b96de6d117 with lease ID 0x8ccaf8162591a9cc: from storage DS-6848afbd-78ab-41e4-83a7-0e289bc5b891 node DatanodeRegistration(127.0.0.1:44019, datanodeUuid=6cc33458-e10e-4ffa-8be6-125b0f2e35b7, infoPort=36593, infoSecurePort=0, ipcPort=34673, storageInfo=lv=-57;cid=testClusterID;nsid=968077902;c=1731961396779), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:23:18,136 WARN [Thread-1657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/cluster_3d4bfd17-cc4d-3477-ff33-634d9d85c3c9/data/data3/current/BP-1810400661-172.17.0.2-1731961396779/current, will proceed with Du for space computation calculation, 2024-11-18T20:23:18,137 WARN [Thread-1658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/cluster_3d4bfd17-cc4d-3477-ff33-634d9d85c3c9/data/data4/current/BP-1810400661-172.17.0.2-1731961396779/current, will proceed with Du for space computation calculation, 2024-11-18T20:23:18,162 WARN [Thread-1633 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:23:18,164 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf83aebbc945affae with lease ID 0x8ccaf8162591a9cd: Processing first storage report for DS-8cbad1a5-4634-41d9-a2fa-8c716a3e5659 from datanode DatanodeRegistration(127.0.0.1:36153, datanodeUuid=10e2ad5a-1ffd-47cd-9db8-95b84e4e59e7, infoPort=33243, infoSecurePort=0, ipcPort=38067, storageInfo=lv=-57;cid=testClusterID;nsid=968077902;c=1731961396779) 2024-11-18T20:23:18,165 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf83aebbc945affae with lease ID 0x8ccaf8162591a9cd: from storage DS-8cbad1a5-4634-41d9-a2fa-8c716a3e5659 node DatanodeRegistration(127.0.0.1:36153, datanodeUuid=10e2ad5a-1ffd-47cd-9db8-95b84e4e59e7, infoPort=33243, infoSecurePort=0, ipcPort=38067, storageInfo=lv=-57;cid=testClusterID;nsid=968077902;c=1731961396779), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T20:23:18,165 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf83aebbc945affae with lease ID 0x8ccaf8162591a9cd: Processing first storage report for DS-021246d9-480f-4071-971a-0ce86d20c9a8 from datanode DatanodeRegistration(127.0.0.1:36153, datanodeUuid=10e2ad5a-1ffd-47cd-9db8-95b84e4e59e7, infoPort=33243, infoSecurePort=0, ipcPort=38067, storageInfo=lv=-57;cid=testClusterID;nsid=968077902;c=1731961396779) 2024-11-18T20:23:18,165 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf83aebbc945affae with lease ID 0x8ccaf8162591a9cd: from storage DS-021246d9-480f-4071-971a-0ce86d20c9a8 node DatanodeRegistration(127.0.0.1:36153, datanodeUuid=10e2ad5a-1ffd-47cd-9db8-95b84e4e59e7, infoPort=33243, infoSecurePort=0, ipcPort=38067, storageInfo=lv=-57;cid=testClusterID;nsid=968077902;c=1731961396779), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:23:18,187 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf 2024-11-18T20:23:18,190 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/cluster_3d4bfd17-cc4d-3477-ff33-634d9d85c3c9/zookeeper_0, clientPort=54705, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/cluster_3d4bfd17-cc4d-3477-ff33-634d9d85c3c9/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/cluster_3d4bfd17-cc4d-3477-ff33-634d9d85c3c9/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T20:23:18,191 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54705 2024-11-18T20:23:18,192 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:23:18,193 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:23:18,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:23:18,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:23:18,208 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045 with version=8 2024-11-18T20:23:18,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/hbase-staging 2024-11-18T20:23:18,210 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5a964fc427ed:0 server-side Connection retries=45 2024-11-18T20:23:18,210 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:23:18,210 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:23:18,210 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:23:18,210 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:23:18,210 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:23:18,210 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T20:23:18,210 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:23:18,213 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37113 2024-11-18T20:23:18,214 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37113 connecting to ZooKeeper ensemble=127.0.0.1:54705 2024-11-18T20:23:18,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:371130x0, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:23:18,264 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37113-0x10150cb69560000 connected 2024-11-18T20:23:18,336 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:23:18,338 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:23:18,341 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:23:18,341 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045, hbase.cluster.distributed=false 2024-11-18T20:23:18,343 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:23:18,346 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37113 2024-11-18T20:23:18,347 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37113 2024-11-18T20:23:18,347 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37113 2024-11-18T20:23:18,350 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37113 2024-11-18T20:23:18,350 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37113 2024-11-18T20:23:18,367 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5a964fc427ed:0 server-side Connection retries=45 2024-11-18T20:23:18,367 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:23:18,367 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:23:18,367 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:23:18,367 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:23:18,367 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:23:18,367 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T20:23:18,367 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:23:18,368 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45865 2024-11-18T20:23:18,370 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45865 connecting to ZooKeeper ensemble=127.0.0.1:54705 2024-11-18T20:23:18,370 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:23:18,372 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:23:18,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:458650x0, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:23:18,386 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:458650x0, quorum=127.0.0.1:54705, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:23:18,386 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45865-0x10150cb69560001 connected 2024-11-18T20:23:18,386 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T20:23:18,390 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T20:23:18,391 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45865-0x10150cb69560001, quorum=127.0.0.1:54705, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T20:23:18,392 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45865-0x10150cb69560001, quorum=127.0.0.1:54705, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:23:18,394 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45865 2024-11-18T20:23:18,398 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45865 2024-11-18T20:23:18,400 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45865 2024-11-18T20:23:18,404 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45865 2024-11-18T20:23:18,404 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45865 2024-11-18T20:23:18,418 DEBUG [M:0;5a964fc427ed:37113 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5a964fc427ed:37113 2024-11-18T20:23:18,418 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5a964fc427ed,37113,1731961398210 2024-11-18T20:23:18,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x10150cb69560001, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:23:18,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:23:18,427 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5a964fc427ed,37113,1731961398210 2024-11-18T20:23:18,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x10150cb69560001, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T20:23:18,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:23:18,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x10150cb69560001, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:23:18,436 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T20:23:18,436 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5a964fc427ed,37113,1731961398210 from backup master directory 2024-11-18T20:23:18,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x10150cb69560001, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:23:18,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5a964fc427ed,37113,1731961398210 2024-11-18T20:23:18,444 WARN [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:23:18,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:23:18,444 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5a964fc427ed,37113,1731961398210 2024-11-18T20:23:18,448 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/hbase.id] with ID: 5dad2aa6-e009-43dd-8d3e-b37566208ac2 2024-11-18T20:23:18,448 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/.tmp/hbase.id 2024-11-18T20:23:18,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:23:18,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:23:18,468 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/.tmp/hbase.id]:[hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/hbase.id] 2024-11-18T20:23:18,483 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:23:18,483 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T20:23:18,485 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-18T20:23:18,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:23:18,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x10150cb69560001, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:23:18,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:23:18,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:23:18,503 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:23:18,504 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T20:23:18,504 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:23:18,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:23:18,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:23:18,524 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store 2024-11-18T20:23:18,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:23:18,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:23:18,535 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:23:18,535 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:23:18,535 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:23:18,535 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:23:18,535 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:23:18,535 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:23:18,535 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:23:18,535 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961398535Disabling compacts and flushes for region at 1731961398535Disabling writes for close at 1731961398535Writing region close event to WAL at 1731961398535Closed at 1731961398535 2024-11-18T20:23:18,536 WARN [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/.initializing 2024-11-18T20:23:18,536 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/WALs/5a964fc427ed,37113,1731961398210 2024-11-18T20:23:18,539 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5a964fc427ed%2C37113%2C1731961398210, suffix=, logDir=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/WALs/5a964fc427ed,37113,1731961398210, archiveDir=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/oldWALs, maxLogs=10 2024-11-18T20:23:18,540 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C37113%2C1731961398210.1731961398540 2024-11-18T20:23:18,545 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/WALs/5a964fc427ed,37113,1731961398210/5a964fc427ed%2C37113%2C1731961398210.1731961398540 2024-11-18T20:23:18,546 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36593:36593),(127.0.0.1/127.0.0.1:33243:33243)] 2024-11-18T20:23:18,553 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:23:18,553 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:23:18,553 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:23:18,553 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:23:18,556 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:23:18,558 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T20:23:18,558 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:23:18,559 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:23:18,559 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:23:18,561 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T20:23:18,561 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:23:18,562 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:23:18,562 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:23:18,563 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T20:23:18,563 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:23:18,564 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:23:18,564 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:23:18,565 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T20:23:18,565 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:23:18,566 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:23:18,566 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:23:18,567 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:23:18,567 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:23:18,568 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:23:18,568 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:23:18,569 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T20:23:18,570 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:23:18,572 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:23:18,573 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=845174, jitterRate=0.07469440996646881}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T20:23:18,573 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731961398553Initializing all the Stores at 1731961398554 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961398554Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961398556 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961398556Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961398556Cleaning up temporary data from old regions at 1731961398568 (+12 ms)Region opened successfully at 1731961398573 (+5 ms) 2024-11-18T20:23:18,573 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T20:23:18,576 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b1f4eb2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5a964fc427ed/172.17.0.2:0 2024-11-18T20:23:18,577 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T20:23:18,577 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T20:23:18,577 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T20:23:18,578 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T20:23:18,578 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T20:23:18,579 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T20:23:18,579 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T20:23:18,582 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T20:23:18,583 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T20:23:18,592 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T20:23:18,592 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T20:23:18,593 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T20:23:18,602 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T20:23:18,602 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T20:23:18,604 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T20:23:18,610 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T20:23:18,611 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T20:23:18,618 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T20:23:18,621 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T20:23:18,627 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T20:23:18,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:23:18,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x10150cb69560001, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:23:18,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:23:18,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x10150cb69560001, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:23:18,636 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5a964fc427ed,37113,1731961398210, sessionid=0x10150cb69560000, setting cluster-up flag (Was=false) 2024-11-18T20:23:18,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:23:18,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x10150cb69560001, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:23:18,677 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T20:23:18,678 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5a964fc427ed,37113,1731961398210 2024-11-18T20:23:18,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x10150cb69560001, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:23:18,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:23:18,719 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T20:23:18,721 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5a964fc427ed,37113,1731961398210 2024-11-18T20:23:18,722 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T20:23:18,724 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T20:23:18,724 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T20:23:18,725 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T20:23:18,725 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5a964fc427ed,37113,1731961398210 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T20:23:18,726 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:23:18,727 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:23:18,727 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:23:18,727 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:23:18,727 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5a964fc427ed:0, corePoolSize=10, maxPoolSize=10 2024-11-18T20:23:18,727 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:23:18,727 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5a964fc427ed:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:23:18,727 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:23:18,727 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731961428727 2024-11-18T20:23:18,728 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T20:23:18,728 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T20:23:18,728 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T20:23:18,728 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T20:23:18,728 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T20:23:18,728 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T20:23:18,728 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:23:18,729 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T20:23:18,729 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:23:18,729 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T20:23:18,729 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T20:23:18,729 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T20:23:18,729 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T20:23:18,729 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T20:23:18,729 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.large.0-1731961398729,5,FailOnTimeoutGroup] 2024-11-18T20:23:18,730 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:23:18,730 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.small.0-1731961398729,5,FailOnTimeoutGroup] 2024-11-18T20:23:18,730 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:23:18,730 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T20:23:18,730 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T20:23:18,730 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T20:23:18,730 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T20:23:18,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:23:18,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:23:18,740 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T20:23:18,740 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045 2024-11-18T20:23:18,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:23:18,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:23:18,747 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:23:18,748 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:23:18,749 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:23:18,750 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:23:18,750 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:23:18,750 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:23:18,752 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:23:18,752 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:23:18,752 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:23:18,752 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:23:18,753 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:23:18,754 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:23:18,754 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:23:18,754 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:23:18,755 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:23:18,755 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:23:18,756 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:23:18,756 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:23:18,757 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/hbase/meta/1588230740 2024-11-18T20:23:18,757 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/hbase/meta/1588230740 2024-11-18T20:23:18,758 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:23:18,758 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:23:18,759 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:23:18,760 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:23:18,763 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:23:18,763 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=773825, jitterRate=-0.016030922532081604}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:23:18,764 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731961398747Initializing all the Stores at 1731961398748 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961398748Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961398748Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961398748Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961398748Cleaning up temporary data from old regions at 1731961398758 (+10 ms)Region opened successfully at 1731961398764 (+6 ms) 2024-11-18T20:23:18,764 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:23:18,764 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:23:18,764 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:23:18,764 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:23:18,764 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:23:18,764 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:23:18,764 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961398764Disabling compacts and flushes for region at 1731961398764Disabling writes for close at 1731961398764Writing region close event to WAL at 1731961398764Closed at 1731961398764 2024-11-18T20:23:18,766 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:23:18,766 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T20:23:18,766 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T20:23:18,767 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:23:18,768 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T20:23:18,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:18,807 INFO [RS:0;5a964fc427ed:45865 {}] regionserver.HRegionServer(746): ClusterId : 5dad2aa6-e009-43dd-8d3e-b37566208ac2 2024-11-18T20:23:18,807 DEBUG [RS:0;5a964fc427ed:45865 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T20:23:18,817 DEBUG [RS:0;5a964fc427ed:45865 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T20:23:18,818 DEBUG [RS:0;5a964fc427ed:45865 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T20:23:18,831 DEBUG [RS:0;5a964fc427ed:45865 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T20:23:18,831 DEBUG [RS:0;5a964fc427ed:45865 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14dc4e0f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5a964fc427ed/172.17.0.2:0 2024-11-18T20:23:18,843 DEBUG [RS:0;5a964fc427ed:45865 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5a964fc427ed:45865 2024-11-18T20:23:18,843 INFO [RS:0;5a964fc427ed:45865 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T20:23:18,843 INFO [RS:0;5a964fc427ed:45865 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T20:23:18,843 DEBUG [RS:0;5a964fc427ed:45865 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T20:23:18,844 INFO [RS:0;5a964fc427ed:45865 {}] regionserver.HRegionServer(2659): reportForDuty to master=5a964fc427ed,37113,1731961398210 with port=45865, startcode=1731961398366 2024-11-18T20:23:18,844 DEBUG [RS:0;5a964fc427ed:45865 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T20:23:18,846 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43831, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T20:23:18,847 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37113 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5a964fc427ed,45865,1731961398366 2024-11-18T20:23:18,847 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37113 {}] master.ServerManager(517): Registering regionserver=5a964fc427ed,45865,1731961398366 2024-11-18T20:23:18,848 DEBUG [RS:0;5a964fc427ed:45865 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045 2024-11-18T20:23:18,848 DEBUG [RS:0;5a964fc427ed:45865 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:32799 2024-11-18T20:23:18,848 DEBUG [RS:0;5a964fc427ed:45865 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T20:23:18,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:18,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:23:18,859 DEBUG [RS:0;5a964fc427ed:45865 {}] zookeeper.ZKUtil(111): regionserver:45865-0x10150cb69560001, quorum=127.0.0.1:54705, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5a964fc427ed,45865,1731961398366 2024-11-18T20:23:18,859 WARN [RS:0;5a964fc427ed:45865 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:23:18,859 INFO [RS:0;5a964fc427ed:45865 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:23:18,859 DEBUG [RS:0;5a964fc427ed:45865 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/WALs/5a964fc427ed,45865,1731961398366 2024-11-18T20:23:18,864 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5a964fc427ed,45865,1731961398366] 2024-11-18T20:23:18,866 INFO [RS:0;5a964fc427ed:45865 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T20:23:18,867 INFO [RS:0;5a964fc427ed:45865 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T20:23:18,868 INFO [RS:0;5a964fc427ed:45865 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T20:23:18,868 INFO [RS:0;5a964fc427ed:45865 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:23:18,868 INFO [RS:0;5a964fc427ed:45865 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T20:23:18,869 INFO [RS:0;5a964fc427ed:45865 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T20:23:18,869 INFO [RS:0;5a964fc427ed:45865 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T20:23:18,869 DEBUG [RS:0;5a964fc427ed:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:23:18,869 DEBUG [RS:0;5a964fc427ed:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:23:18,869 DEBUG [RS:0;5a964fc427ed:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:23:18,869 DEBUG [RS:0;5a964fc427ed:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:23:18,869 DEBUG [RS:0;5a964fc427ed:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:23:18,869 DEBUG [RS:0;5a964fc427ed:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5a964fc427ed:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:23:18,869 DEBUG [RS:0;5a964fc427ed:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:23:18,869 DEBUG [RS:0;5a964fc427ed:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:23:18,869 DEBUG [RS:0;5a964fc427ed:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:23:18,869 DEBUG [RS:0;5a964fc427ed:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:23:18,869 DEBUG [RS:0;5a964fc427ed:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:23:18,869 DEBUG [RS:0;5a964fc427ed:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:23:18,870 DEBUG [RS:0;5a964fc427ed:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5a964fc427ed:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:23:18,870 DEBUG [RS:0;5a964fc427ed:45865 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:23:18,870 INFO [RS:0;5a964fc427ed:45865 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:23:18,870 INFO [RS:0;5a964fc427ed:45865 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:23:18,870 INFO [RS:0;5a964fc427ed:45865 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:23:18,870 INFO [RS:0;5a964fc427ed:45865 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T20:23:18,870 INFO [RS:0;5a964fc427ed:45865 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T20:23:18,870 INFO [RS:0;5a964fc427ed:45865 {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,45865,1731961398366-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:23:18,890 INFO [RS:0;5a964fc427ed:45865 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T20:23:18,890 INFO [RS:0;5a964fc427ed:45865 {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,45865,1731961398366-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:23:18,890 INFO [RS:0;5a964fc427ed:45865 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:23:18,890 INFO [RS:0;5a964fc427ed:45865 {}] regionserver.Replication(171): 5a964fc427ed,45865,1731961398366 started 2024-11-18T20:23:18,907 INFO [RS:0;5a964fc427ed:45865 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:23:18,907 INFO [RS:0;5a964fc427ed:45865 {}] regionserver.HRegionServer(1482): Serving as 5a964fc427ed,45865,1731961398366, RpcServer on 5a964fc427ed/172.17.0.2:45865, sessionid=0x10150cb69560001 2024-11-18T20:23:18,907 DEBUG [RS:0;5a964fc427ed:45865 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T20:23:18,907 DEBUG [RS:0;5a964fc427ed:45865 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5a964fc427ed,45865,1731961398366 2024-11-18T20:23:18,907 DEBUG [RS:0;5a964fc427ed:45865 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5a964fc427ed,45865,1731961398366' 2024-11-18T20:23:18,907 DEBUG [RS:0;5a964fc427ed:45865 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T20:23:18,908 DEBUG [RS:0;5a964fc427ed:45865 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T20:23:18,908 DEBUG [RS:0;5a964fc427ed:45865 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T20:23:18,908 DEBUG [RS:0;5a964fc427ed:45865 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T20:23:18,908 DEBUG [RS:0;5a964fc427ed:45865 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5a964fc427ed,45865,1731961398366 2024-11-18T20:23:18,908 DEBUG [RS:0;5a964fc427ed:45865 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5a964fc427ed,45865,1731961398366' 2024-11-18T20:23:18,908 DEBUG [RS:0;5a964fc427ed:45865 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T20:23:18,909 DEBUG [RS:0;5a964fc427ed:45865 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T20:23:18,909 DEBUG [RS:0;5a964fc427ed:45865 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T20:23:18,909 INFO [RS:0;5a964fc427ed:45865 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T20:23:18,909 INFO [RS:0;5a964fc427ed:45865 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T20:23:18,919 WARN [5a964fc427ed:37113 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T20:23:19,011 INFO [RS:0;5a964fc427ed:45865 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5a964fc427ed%2C45865%2C1731961398366, suffix=, logDir=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/WALs/5a964fc427ed,45865,1731961398366, archiveDir=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/oldWALs, maxLogs=32 2024-11-18T20:23:19,012 INFO [RS:0;5a964fc427ed:45865 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C45865%2C1731961398366.1731961399012 2024-11-18T20:23:19,024 INFO [RS:0;5a964fc427ed:45865 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/WALs/5a964fc427ed,45865,1731961398366/5a964fc427ed%2C45865%2C1731961398366.1731961399012 2024-11-18T20:23:19,025 DEBUG [RS:0;5a964fc427ed:45865 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33243:33243),(127.0.0.1/127.0.0.1:36593:36593)] 2024-11-18T20:23:19,169 DEBUG [5a964fc427ed:37113 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T20:23:19,169 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5a964fc427ed,45865,1731961398366 2024-11-18T20:23:19,171 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5a964fc427ed,45865,1731961398366, state=OPENING 2024-11-18T20:23:19,177 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T20:23:19,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:23:19,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x10150cb69560001, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:23:19,187 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:23:19,187 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:23:19,187 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:23:19,187 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5a964fc427ed,45865,1731961398366}] 2024-11-18T20:23:19,340 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T20:23:19,342 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37039, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T20:23:19,346 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T20:23:19,346 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:23:19,348 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5a964fc427ed%2C45865%2C1731961398366.meta, suffix=.meta, logDir=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/WALs/5a964fc427ed,45865,1731961398366, archiveDir=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/oldWALs, maxLogs=32 2024-11-18T20:23:19,349 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C45865%2C1731961398366.meta.1731961399349.meta 2024-11-18T20:23:19,357 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/WALs/5a964fc427ed,45865,1731961398366/5a964fc427ed%2C45865%2C1731961398366.meta.1731961399349.meta 2024-11-18T20:23:19,362 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36593:36593),(127.0.0.1/127.0.0.1:33243:33243)] 2024-11-18T20:23:19,367 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:23:19,367 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T20:23:19,367 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T20:23:19,367 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T20:23:19,367 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T20:23:19,368 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:23:19,368 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T20:23:19,368 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T20:23:19,369 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:23:19,370 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:23:19,370 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:23:19,371 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:23:19,371 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:23:19,371 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:23:19,371 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:23:19,372 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:23:19,372 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:23:19,373 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:23:19,373 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:23:19,373 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:23:19,373 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:23:19,374 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:23:19,374 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:23:19,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:23:19,375 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:23:19,375 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/hbase/meta/1588230740 2024-11-18T20:23:19,376 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/hbase/meta/1588230740 2024-11-18T20:23:19,378 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:23:19,378 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:23:19,378 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:23:19,380 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:23:19,380 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=879809, jitterRate=0.11873601377010345}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:23:19,380 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T20:23:19,381 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731961399368Writing region info on filesystem at 1731961399368Initializing all the Stores at 1731961399369 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961399369Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961399369Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961399369Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961399369Cleaning up temporary data from old regions at 1731961399378 (+9 ms)Running coprocessor post-open hooks at 1731961399380 (+2 ms)Region opened successfully at 1731961399381 (+1 ms) 2024-11-18T20:23:19,382 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731961399340 2024-11-18T20:23:19,385 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T20:23:19,385 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T20:23:19,386 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5a964fc427ed,45865,1731961398366 2024-11-18T20:23:19,386 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5a964fc427ed,45865,1731961398366, state=OPEN 2024-11-18T20:23:19,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x10150cb69560001, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:23:19,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:23:19,428 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5a964fc427ed,45865,1731961398366 2024-11-18T20:23:19,428 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:23:19,428 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:23:19,431 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T20:23:19,431 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5a964fc427ed,45865,1731961398366 in 241 msec 2024-11-18T20:23:19,435 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T20:23:19,435 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 665 msec 2024-11-18T20:23:19,436 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:23:19,436 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T20:23:19,438 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:23:19,438 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5a964fc427ed,45865,1731961398366, seqNum=-1] 2024-11-18T20:23:19,439 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:23:19,440 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50155, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:23:19,447 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 722 msec 2024-11-18T20:23:19,447 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731961399447, completionTime=-1 2024-11-18T20:23:19,447 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T20:23:19,447 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T20:23:19,450 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T20:23:19,450 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731961459450 2024-11-18T20:23:19,450 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731961519450 2024-11-18T20:23:19,450 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-18T20:23:19,450 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,37113,1731961398210-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:23:19,450 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,37113,1731961398210-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:23:19,450 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,37113,1731961398210-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:23:19,450 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5a964fc427ed:37113, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:23:19,451 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T20:23:19,451 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T20:23:19,453 DEBUG [master/5a964fc427ed:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T20:23:19,456 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.012sec 2024-11-18T20:23:19,456 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T20:23:19,456 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T20:23:19,456 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T20:23:19,456 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T20:23:19,456 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T20:23:19,456 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,37113,1731961398210-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:23:19,456 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,37113,1731961398210-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T20:23:19,459 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T20:23:19,459 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T20:23:19,459 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,37113,1731961398210-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:23:19,507 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74cca69f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:23:19,507 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5a964fc427ed,37113,-1 for getting cluster id 2024-11-18T20:23:19,507 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T20:23:19,516 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5dad2aa6-e009-43dd-8d3e-b37566208ac2' 2024-11-18T20:23:19,517 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T20:23:19,517 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5dad2aa6-e009-43dd-8d3e-b37566208ac2" 2024-11-18T20:23:19,517 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2405a0f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:23:19,517 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5a964fc427ed,37113,-1] 2024-11-18T20:23:19,518 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T20:23:19,518 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:23:19,519 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41494, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T20:23:19,521 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68c49893, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:23:19,521 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:23:19,522 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5a964fc427ed,45865,1731961398366, seqNum=-1] 2024-11-18T20:23:19,522 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:23:19,524 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34320, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:23:19,526 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5a964fc427ed,37113,1731961398210 2024-11-18T20:23:19,526 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:23:19,530 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T20:23:19,531 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T20:23:19,532 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 5a964fc427ed,37113,1731961398210 2024-11-18T20:23:19,532 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2ae49854 2024-11-18T20:23:19,533 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T20:23:19,534 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41496, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T20:23:19,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T20:23:19,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T20:23:19,535 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:23:19,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:23:19,538 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T20:23:19,538 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:23:19,538 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-18T20:23:19,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T20:23:19,539 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T20:23:19,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741835_1011 (size=405) 2024-11-18T20:23:19,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741835_1011 (size=405) 2024-11-18T20:23:19,554 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 63bd71b99ffa93871afd9a888d9a789e, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045 2024-11-18T20:23:19,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741836_1012 (size=88) 2024-11-18T20:23:19,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741836_1012 (size=88) 2024-11-18T20:23:19,561 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:23:19,561 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 63bd71b99ffa93871afd9a888d9a789e, disabling compactions & flushes 2024-11-18T20:23:19,561 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. 2024-11-18T20:23:19,561 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. 2024-11-18T20:23:19,561 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. after waiting 0 ms 2024-11-18T20:23:19,561 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. 2024-11-18T20:23:19,562 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. 2024-11-18T20:23:19,562 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 63bd71b99ffa93871afd9a888d9a789e: Waiting for close lock at 1731961399561Disabling compacts and flushes for region at 1731961399561Disabling writes for close at 1731961399561Writing region close event to WAL at 1731961399562 (+1 ms)Closed at 1731961399562 2024-11-18T20:23:19,563 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T20:23:19,564 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731961399563"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731961399563"}]},"ts":"1731961399563"} 2024-11-18T20:23:19,567 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T20:23:19,568 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T20:23:19,568 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731961399568"}]},"ts":"1731961399568"} 2024-11-18T20:23:19,570 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-18T20:23:19,571 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=63bd71b99ffa93871afd9a888d9a789e, ASSIGN}] 2024-11-18T20:23:19,572 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=63bd71b99ffa93871afd9a888d9a789e, ASSIGN 2024-11-18T20:23:19,574 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=63bd71b99ffa93871afd9a888d9a789e, ASSIGN; state=OFFLINE, location=5a964fc427ed,45865,1731961398366; forceNewPlan=false, retain=false 2024-11-18T20:23:19,724 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=63bd71b99ffa93871afd9a888d9a789e, regionState=OPENING, regionLocation=5a964fc427ed,45865,1731961398366 2024-11-18T20:23:19,727 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=63bd71b99ffa93871afd9a888d9a789e, ASSIGN because future has completed 2024-11-18T20:23:19,728 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 63bd71b99ffa93871afd9a888d9a789e, server=5a964fc427ed,45865,1731961398366}] 2024-11-18T20:23:19,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:19,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:19,885 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. 2024-11-18T20:23:19,885 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 63bd71b99ffa93871afd9a888d9a789e, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:23:19,886 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 63bd71b99ffa93871afd9a888d9a789e 2024-11-18T20:23:19,886 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:23:19,886 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 63bd71b99ffa93871afd9a888d9a789e 2024-11-18T20:23:19,886 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 63bd71b99ffa93871afd9a888d9a789e 2024-11-18T20:23:19,887 INFO [StoreOpener-63bd71b99ffa93871afd9a888d9a789e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 63bd71b99ffa93871afd9a888d9a789e 2024-11-18T20:23:19,889 INFO [StoreOpener-63bd71b99ffa93871afd9a888d9a789e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 63bd71b99ffa93871afd9a888d9a789e columnFamilyName info 2024-11-18T20:23:19,889 DEBUG [StoreOpener-63bd71b99ffa93871afd9a888d9a789e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:23:19,890 INFO [StoreOpener-63bd71b99ffa93871afd9a888d9a789e-1 {}] regionserver.HStore(327): Store=63bd71b99ffa93871afd9a888d9a789e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:23:19,890 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 63bd71b99ffa93871afd9a888d9a789e 2024-11-18T20:23:19,891 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e 2024-11-18T20:23:19,891 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e 2024-11-18T20:23:19,891 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 63bd71b99ffa93871afd9a888d9a789e 2024-11-18T20:23:19,892 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 63bd71b99ffa93871afd9a888d9a789e 2024-11-18T20:23:19,893 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 63bd71b99ffa93871afd9a888d9a789e 2024-11-18T20:23:19,896 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:23:19,896 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 63bd71b99ffa93871afd9a888d9a789e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=735680, jitterRate=-0.0645352303981781}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T20:23:19,896 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 63bd71b99ffa93871afd9a888d9a789e 2024-11-18T20:23:19,897 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 63bd71b99ffa93871afd9a888d9a789e: Running coprocessor pre-open hook at 1731961399886Writing region info on filesystem at 1731961399886Initializing all the Stores at 1731961399887 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961399887Cleaning up temporary data from old regions at 1731961399892 (+5 ms)Running coprocessor post-open hooks at 1731961399896 (+4 ms)Region opened successfully at 1731961399897 (+1 ms) 2024-11-18T20:23:19,898 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e., pid=6, masterSystemTime=1731961399881 2024-11-18T20:23:19,901 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. 2024-11-18T20:23:19,901 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. 2024-11-18T20:23:19,902 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=63bd71b99ffa93871afd9a888d9a789e, regionState=OPEN, openSeqNum=2, regionLocation=5a964fc427ed,45865,1731961398366 2024-11-18T20:23:19,905 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 63bd71b99ffa93871afd9a888d9a789e, server=5a964fc427ed,45865,1731961398366 because future has completed 2024-11-18T20:23:19,910 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T20:23:19,910 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 63bd71b99ffa93871afd9a888d9a789e, server=5a964fc427ed,45865,1731961398366 in 178 msec 2024-11-18T20:23:19,913 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T20:23:19,914 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=63bd71b99ffa93871afd9a888d9a789e, ASSIGN in 339 msec 2024-11-18T20:23:19,916 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T20:23:19,916 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731961399916"}]},"ts":"1731961399916"} 2024-11-18T20:23:19,919 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-18T20:23:19,920 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T20:23:19,922 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 385 msec 2024-11-18T20:23:20,802 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:20,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:20,896 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T20:23:20,898 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:20,898 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:20,899 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:20,899 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:20,899 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:20,899 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:20,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:20,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:20,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:20,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:20,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:20,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:20,927 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:20,927 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:20,928 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:20,931 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:23:21,803 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:21,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:22,804 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:22,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:23,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:23,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:24,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:24,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:24,866 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T20:23:24,867 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-18T20:23:25,393 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T20:23:25,393 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-18T20:23:25,394 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:23:25,394 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-18T20:23:25,394 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-18T20:23:25,394 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-18T20:23:25,395 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:23:25,395 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-18T20:23:25,806 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:25,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:26,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:26,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:27,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:27,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:28,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:28,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:29,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T20:23:29,585 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T20:23:29,585 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-18T20:23:29,589 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:23:29,589 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. 2024-11-18T20:23:29,593 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e., hostname=5a964fc427ed,45865,1731961398366, seqNum=2] 2024-11-18T20:23:29,603 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:23:29,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:23:29,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-18T20:23:29,613 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-18T20:23:29,614 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-18T20:23:29,616 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-18T20:23:29,777 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-18T20:23:29,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. 2024-11-18T20:23:29,778 INFO [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 63bd71b99ffa93871afd9a888d9a789e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T20:23:29,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/.tmp/info/c03597a2bf284e9989769f842e623583 is 1080, key is row0001/info:/1731961409594/Put/seqid=0 2024-11-18T20:23:29,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741837_1013 (size=6033) 2024-11-18T20:23:29,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741837_1013 (size=6033) 2024-11-18T20:23:29,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:29,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:30,202 INFO [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/.tmp/info/c03597a2bf284e9989769f842e623583 2024-11-18T20:23:30,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/.tmp/info/c03597a2bf284e9989769f842e623583 as hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/c03597a2bf284e9989769f842e623583 2024-11-18T20:23:30,220 INFO [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/c03597a2bf284e9989769f842e623583, entries=1, sequenceid=5, filesize=5.9 K 2024-11-18T20:23:30,221 INFO [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 63bd71b99ffa93871afd9a888d9a789e in 443ms, sequenceid=5, compaction requested=false 2024-11-18T20:23:30,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 63bd71b99ffa93871afd9a888d9a789e: 2024-11-18T20:23:30,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. 2024-11-18T20:23:30,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-18T20:23:30,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-18T20:23:30,230 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-18T20:23:30,230 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 611 msec 2024-11-18T20:23:30,232 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 626 msec 2024-11-18T20:23:30,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:30,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:31,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:31,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:32,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:32,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:33,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:33,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:34,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:34,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:35,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:35,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:36,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:36,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:37,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:37,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:38,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:38,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:39,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-18T20:23:39,695 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T20:23:39,698 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:23:39,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:23:39,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-18T20:23:39,701 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-18T20:23:39,702 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-18T20:23:39,702 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-18T20:23:39,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:39,860 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-18T20:23:39,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. 2024-11-18T20:23:39,860 INFO [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 63bd71b99ffa93871afd9a888d9a789e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T20:23:39,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/.tmp/info/cea2297261dc427aa486a1f417b1e273 is 1080, key is row0002/info:/1731961419696/Put/seqid=0 2024-11-18T20:23:39,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:39,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741838_1014 (size=6033) 2024-11-18T20:23:39,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741838_1014 (size=6033) 2024-11-18T20:23:39,872 INFO [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/.tmp/info/cea2297261dc427aa486a1f417b1e273 2024-11-18T20:23:39,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/.tmp/info/cea2297261dc427aa486a1f417b1e273 as hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/cea2297261dc427aa486a1f417b1e273 2024-11-18T20:23:39,886 INFO [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/cea2297261dc427aa486a1f417b1e273, entries=1, sequenceid=9, filesize=5.9 K 2024-11-18T20:23:39,888 INFO [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 63bd71b99ffa93871afd9a888d9a789e in 27ms, sequenceid=9, compaction requested=false 2024-11-18T20:23:39,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 63bd71b99ffa93871afd9a888d9a789e: 2024-11-18T20:23:39,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. 2024-11-18T20:23:39,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-18T20:23:39,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-18T20:23:39,893 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-18T20:23:39,893 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 188 msec 2024-11-18T20:23:39,895 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 196 msec 2024-11-18T20:23:40,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:40,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:41,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:41,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 after 68064ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor192.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:23:41,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:41,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta after 68056ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor192.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:23:42,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:42,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:43,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:43,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:44,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:44,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:45,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:45,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:46,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:46,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:47,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:47,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:48,187 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T20:23:48,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:48,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:49,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-18T20:23:49,786 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T20:23:49,790 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C45865%2C1731961398366.1731961429790 2024-11-18T20:23:49,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:49,871 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:49,871 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:49,871 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:49,872 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:49,872 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:49,872 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/WALs/5a964fc427ed,45865,1731961398366/5a964fc427ed%2C45865%2C1731961398366.1731961399012 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/WALs/5a964fc427ed,45865,1731961398366/5a964fc427ed%2C45865%2C1731961398366.1731961429790 2024-11-18T20:23:49,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:49,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741833_1009 (size=5546) 2024-11-18T20:23:49,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741833_1009 (size=5546) 2024-11-18T20:23:49,879 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36593:36593),(127.0.0.1/127.0.0.1:33243:33243)] 2024-11-18T20:23:49,880 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:23:49,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:23:49,883 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-18T20:23:49,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-18T20:23:49,884 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-18T20:23:49,884 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-18T20:23:50,039 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-18T20:23:50,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. 2024-11-18T20:23:50,043 INFO [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 63bd71b99ffa93871afd9a888d9a789e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T20:23:50,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/.tmp/info/5cbbcd1d270241ce9deac044767c160c is 1080, key is row0003/info:/1731961429788/Put/seqid=0 2024-11-18T20:23:50,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741840_1016 (size=6033) 2024-11-18T20:23:50,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741840_1016 (size=6033) 2024-11-18T20:23:50,063 INFO [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/.tmp/info/5cbbcd1d270241ce9deac044767c160c 2024-11-18T20:23:50,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/.tmp/info/5cbbcd1d270241ce9deac044767c160c as hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/5cbbcd1d270241ce9deac044767c160c 2024-11-18T20:23:50,093 INFO [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/5cbbcd1d270241ce9deac044767c160c, entries=1, sequenceid=13, filesize=5.9 K 2024-11-18T20:23:50,094 INFO [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 63bd71b99ffa93871afd9a888d9a789e in 51ms, sequenceid=13, compaction requested=true 2024-11-18T20:23:50,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 63bd71b99ffa93871afd9a888d9a789e: 2024-11-18T20:23:50,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. 2024-11-18T20:23:50,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-18T20:23:50,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-18T20:23:50,102 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-18T20:23:50,103 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 212 msec 2024-11-18T20:23:50,107 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 224 msec 2024-11-18T20:23:50,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:50,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:51,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:51,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:52,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:52,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:53,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:53,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:54,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:54,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:55,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:55,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:56,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:56,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:57,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:57,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:58,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:58,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:59,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:59,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:23:59,892 INFO [master/5a964fc427ed:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-18T20:23:59,892 INFO [master/5a964fc427ed:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-18T20:23:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-18T20:23:59,935 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T20:23:59,935 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:23:59,936 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:23:59,936 DEBUG [Time-limited test {}] regionserver.HStore(1541): 63bd71b99ffa93871afd9a888d9a789e/info is initiating minor compaction (all files) 2024-11-18T20:23:59,936 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T20:23:59,936 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:23:59,937 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 63bd71b99ffa93871afd9a888d9a789e/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. 2024-11-18T20:23:59,937 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/c03597a2bf284e9989769f842e623583, hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/cea2297261dc427aa486a1f417b1e273, hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/5cbbcd1d270241ce9deac044767c160c] into tmpdir=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/.tmp, totalSize=17.7 K 2024-11-18T20:23:59,937 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting c03597a2bf284e9989769f842e623583, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731961409594 2024-11-18T20:23:59,937 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting cea2297261dc427aa486a1f417b1e273, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731961419696 2024-11-18T20:23:59,938 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 5cbbcd1d270241ce9deac044767c160c, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731961429788 2024-11-18T20:23:59,950 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 63bd71b99ffa93871afd9a888d9a789e#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:23:59,951 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/.tmp/info/f2b75ff12cc0439e8977e43a03dbb844 is 1080, key is row0001/info:/1731961409594/Put/seqid=0 2024-11-18T20:23:59,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741841_1017 (size=8296) 2024-11-18T20:23:59,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741841_1017 (size=8296) 2024-11-18T20:23:59,962 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/.tmp/info/f2b75ff12cc0439e8977e43a03dbb844 as hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/f2b75ff12cc0439e8977e43a03dbb844 2024-11-18T20:23:59,971 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 63bd71b99ffa93871afd9a888d9a789e/info of 63bd71b99ffa93871afd9a888d9a789e into f2b75ff12cc0439e8977e43a03dbb844(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:23:59,971 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 63bd71b99ffa93871afd9a888d9a789e: 2024-11-18T20:23:59,973 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C45865%2C1731961398366.1731961439973 2024-11-18T20:23:59,979 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:59,979 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:59,979 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:59,979 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:59,979 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:23:59,979 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/WALs/5a964fc427ed,45865,1731961398366/5a964fc427ed%2C45865%2C1731961398366.1731961429790 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/WALs/5a964fc427ed,45865,1731961398366/5a964fc427ed%2C45865%2C1731961398366.1731961439973 2024-11-18T20:23:59,980 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36593:36593),(127.0.0.1/127.0.0.1:33243:33243)] 2024-11-18T20:23:59,980 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/WALs/5a964fc427ed,45865,1731961398366/5a964fc427ed%2C45865%2C1731961398366.1731961429790 is not closed yet, will try archiving it next time 2024-11-18T20:23:59,980 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/WALs/5a964fc427ed,45865,1731961398366/5a964fc427ed%2C45865%2C1731961398366.1731961399012 to hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/oldWALs/5a964fc427ed%2C45865%2C1731961398366.1731961399012 2024-11-18T20:23:59,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741839_1015 (size=2520) 2024-11-18T20:23:59,981 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:23:59,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741839_1015 (size=2520) 2024-11-18T20:23:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:23:59,983 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-18T20:23:59,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-18T20:23:59,984 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-18T20:23:59,984 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-18T20:24:00,137 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-18T20:24:00,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. 2024-11-18T20:24:00,137 INFO [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 63bd71b99ffa93871afd9a888d9a789e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T20:24:00,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/.tmp/info/b80a28dcc6ed4fb2a89dcb1f5ee8705d is 1080, key is row0000/info:/1731961439972/Put/seqid=0 2024-11-18T20:24:00,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741843_1019 (size=6033) 2024-11-18T20:24:00,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741843_1019 (size=6033) 2024-11-18T20:24:00,549 INFO [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/.tmp/info/b80a28dcc6ed4fb2a89dcb1f5ee8705d 2024-11-18T20:24:00,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/.tmp/info/b80a28dcc6ed4fb2a89dcb1f5ee8705d as hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/b80a28dcc6ed4fb2a89dcb1f5ee8705d 2024-11-18T20:24:00,565 INFO [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/b80a28dcc6ed4fb2a89dcb1f5ee8705d, entries=1, sequenceid=18, filesize=5.9 K 2024-11-18T20:24:00,567 INFO [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 63bd71b99ffa93871afd9a888d9a789e in 430ms, sequenceid=18, compaction requested=false 2024-11-18T20:24:00,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 63bd71b99ffa93871afd9a888d9a789e: 2024-11-18T20:24:00,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. 2024-11-18T20:24:00,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-18T20:24:00,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-18T20:24:00,572 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-18T20:24:00,572 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 585 msec 2024-11-18T20:24:00,576 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 592 msec 2024-11-18T20:24:00,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:00,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:01,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:01,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:02,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:02,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:03,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:03,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:04,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:04,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:04,886 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 63bd71b99ffa93871afd9a888d9a789e, had cached 0 bytes from a total of 14329 2024-11-18T20:24:05,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:05,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:06,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:06,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:07,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:07,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:08,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:08,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:09,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:09,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:10,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37113 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-18T20:24:10,065 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T20:24:10,070 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C45865%2C1731961398366.1731961450070 2024-11-18T20:24:10,080 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:10,080 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:10,080 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:10,080 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:10,081 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:10,081 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/WALs/5a964fc427ed,45865,1731961398366/5a964fc427ed%2C45865%2C1731961398366.1731961439973 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/WALs/5a964fc427ed,45865,1731961398366/5a964fc427ed%2C45865%2C1731961398366.1731961450070 2024-11-18T20:24:10,082 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33243:33243),(127.0.0.1/127.0.0.1:36593:36593)] 2024-11-18T20:24:10,082 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/WALs/5a964fc427ed,45865,1731961398366/5a964fc427ed%2C45865%2C1731961398366.1731961439973 is not closed yet, will try archiving it next time 2024-11-18T20:24:10,082 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/WALs/5a964fc427ed,45865,1731961398366/5a964fc427ed%2C45865%2C1731961398366.1731961429790 to hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/oldWALs/5a964fc427ed%2C45865%2C1731961398366.1731961429790 2024-11-18T20:24:10,082 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T20:24:10,082 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:24:10,083 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:24:10,083 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:24:10,083 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:24:10,083 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T20:24:10,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741842_1018 (size=2026) 2024-11-18T20:24:10,083 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T20:24:10,083 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1734479598, stopped=false 2024-11-18T20:24:10,083 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5a964fc427ed,37113,1731961398210 2024-11-18T20:24:10,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741842_1018 (size=2026) 2024-11-18T20:24:10,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x10150cb69560001, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:24:10,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:24:10,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:10,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x10150cb69560001, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:10,114 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:24:10,114 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:24:10,114 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:24:10,114 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:24:10,114 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5a964fc427ed,45865,1731961398366' ***** 2024-11-18T20:24:10,114 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T20:24:10,115 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:24:10,115 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45865-0x10150cb69560001, quorum=127.0.0.1:54705, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:24:10,115 INFO [RS:0;5a964fc427ed:45865 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T20:24:10,115 INFO [RS:0;5a964fc427ed:45865 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T20:24:10,115 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T20:24:10,115 INFO [RS:0;5a964fc427ed:45865 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T20:24:10,115 INFO [RS:0;5a964fc427ed:45865 {}] regionserver.HRegionServer(3091): Received CLOSE for 63bd71b99ffa93871afd9a888d9a789e 2024-11-18T20:24:10,115 INFO [RS:0;5a964fc427ed:45865 {}] regionserver.HRegionServer(959): stopping server 5a964fc427ed,45865,1731961398366 2024-11-18T20:24:10,115 INFO [RS:0;5a964fc427ed:45865 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:24:10,115 INFO [RS:0;5a964fc427ed:45865 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5a964fc427ed:45865. 2024-11-18T20:24:10,115 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 63bd71b99ffa93871afd9a888d9a789e, disabling compactions & flushes 2024-11-18T20:24:10,115 DEBUG [RS:0;5a964fc427ed:45865 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:24:10,115 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. 2024-11-18T20:24:10,115 DEBUG [RS:0;5a964fc427ed:45865 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:24:10,115 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. 2024-11-18T20:24:10,115 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. after waiting 0 ms 2024-11-18T20:24:10,115 INFO [RS:0;5a964fc427ed:45865 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T20:24:10,115 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. 2024-11-18T20:24:10,115 INFO [RS:0;5a964fc427ed:45865 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T20:24:10,115 INFO [RS:0;5a964fc427ed:45865 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T20:24:10,116 INFO [RS:0;5a964fc427ed:45865 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T20:24:10,116 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 63bd71b99ffa93871afd9a888d9a789e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T20:24:10,116 INFO [RS:0;5a964fc427ed:45865 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-18T20:24:10,116 DEBUG [RS:0;5a964fc427ed:45865 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 63bd71b99ffa93871afd9a888d9a789e=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e.} 2024-11-18T20:24:10,116 DEBUG [RS:0;5a964fc427ed:45865 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 63bd71b99ffa93871afd9a888d9a789e 2024-11-18T20:24:10,116 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:24:10,116 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:24:10,116 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:24:10,116 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:24:10,116 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:24:10,116 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-18T20:24:10,120 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/.tmp/info/25d7619988ca45c1aae3203582c69e09 is 1080, key is row0001/info:/1731961450067/Put/seqid=0 2024-11-18T20:24:10,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741845_1021 (size=6033) 2024-11-18T20:24:10,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741845_1021 (size=6033) 2024-11-18T20:24:10,129 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/.tmp/info/25d7619988ca45c1aae3203582c69e09 2024-11-18T20:24:10,132 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/hbase/meta/1588230740/.tmp/info/c475406aa1c749468b1e06acb60fc634 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e./info:regioninfo/1731961399902/Put/seqid=0 2024-11-18T20:24:10,136 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/.tmp/info/25d7619988ca45c1aae3203582c69e09 as hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/25d7619988ca45c1aae3203582c69e09 2024-11-18T20:24:10,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741846_1022 (size=7308) 2024-11-18T20:24:10,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741846_1022 (size=7308) 2024-11-18T20:24:10,137 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/hbase/meta/1588230740/.tmp/info/c475406aa1c749468b1e06acb60fc634 2024-11-18T20:24:10,141 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/25d7619988ca45c1aae3203582c69e09, entries=1, sequenceid=22, filesize=5.9 K 2024-11-18T20:24:10,142 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 63bd71b99ffa93871afd9a888d9a789e in 27ms, sequenceid=22, compaction requested=true 2024-11-18T20:24:10,142 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/c03597a2bf284e9989769f842e623583, hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/cea2297261dc427aa486a1f417b1e273, hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/5cbbcd1d270241ce9deac044767c160c] to archive 2024-11-18T20:24:10,143 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T20:24:10,145 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/c03597a2bf284e9989769f842e623583 to hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/c03597a2bf284e9989769f842e623583 2024-11-18T20:24:10,146 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/cea2297261dc427aa486a1f417b1e273 to hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/cea2297261dc427aa486a1f417b1e273 2024-11-18T20:24:10,147 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/5cbbcd1d270241ce9deac044767c160c to hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/info/5cbbcd1d270241ce9deac044767c160c 2024-11-18T20:24:10,148 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5a964fc427ed:37113 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-18T20:24:10,148 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [c03597a2bf284e9989769f842e623583=6033, cea2297261dc427aa486a1f417b1e273=6033, 5cbbcd1d270241ce9deac044767c160c=6033] 2024-11-18T20:24:10,152 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63bd71b99ffa93871afd9a888d9a789e/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-18T20:24:10,152 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. 2024-11-18T20:24:10,152 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 63bd71b99ffa93871afd9a888d9a789e: Waiting for close lock at 1731961450115Running coprocessor pre-close hooks at 1731961450115Disabling compacts and flushes for region at 1731961450115Disabling writes for close at 1731961450115Obtaining lock to block concurrent updates at 1731961450116 (+1 ms)Preparing flush snapshotting stores in 63bd71b99ffa93871afd9a888d9a789e at 1731961450116Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731961450116Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. at 1731961450116Flushing 63bd71b99ffa93871afd9a888d9a789e/info: creating writer at 1731961450117 (+1 ms)Flushing 63bd71b99ffa93871afd9a888d9a789e/info: appending metadata at 1731961450119 (+2 ms)Flushing 63bd71b99ffa93871afd9a888d9a789e/info: closing flushed file at 1731961450119Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4cbd13ad: reopening flushed file at 1731961450135 (+16 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 63bd71b99ffa93871afd9a888d9a789e in 27ms, sequenceid=22, compaction requested=true at 1731961450142 (+7 ms)Writing region close event to WAL at 1731961450148 (+6 ms)Running coprocessor post-close hooks at 1731961450152 (+4 ms)Closed at 1731961450152 2024-11-18T20:24:10,152 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961399535.63bd71b99ffa93871afd9a888d9a789e. 2024-11-18T20:24:10,157 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/hbase/meta/1588230740/.tmp/ns/2ec9bd69aa5d4ccba2ac9709a4e6f1b9 is 43, key is default/ns:d/1731961399441/Put/seqid=0 2024-11-18T20:24:10,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741847_1023 (size=5153) 2024-11-18T20:24:10,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741847_1023 (size=5153) 2024-11-18T20:24:10,162 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/hbase/meta/1588230740/.tmp/ns/2ec9bd69aa5d4ccba2ac9709a4e6f1b9 2024-11-18T20:24:10,180 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/hbase/meta/1588230740/.tmp/table/168072c6ca7f4e919ceeda029151b33c is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731961399916/Put/seqid=0 2024-11-18T20:24:10,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741848_1024 (size=5508) 2024-11-18T20:24:10,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741848_1024 (size=5508) 2024-11-18T20:24:10,186 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/hbase/meta/1588230740/.tmp/table/168072c6ca7f4e919ceeda029151b33c 2024-11-18T20:24:10,191 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/hbase/meta/1588230740/.tmp/info/c475406aa1c749468b1e06acb60fc634 as hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/hbase/meta/1588230740/info/c475406aa1c749468b1e06acb60fc634 2024-11-18T20:24:10,197 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/hbase/meta/1588230740/info/c475406aa1c749468b1e06acb60fc634, entries=10, sequenceid=11, filesize=7.1 K 2024-11-18T20:24:10,198 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/hbase/meta/1588230740/.tmp/ns/2ec9bd69aa5d4ccba2ac9709a4e6f1b9 as hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/hbase/meta/1588230740/ns/2ec9bd69aa5d4ccba2ac9709a4e6f1b9 2024-11-18T20:24:10,203 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/hbase/meta/1588230740/ns/2ec9bd69aa5d4ccba2ac9709a4e6f1b9, entries=2, sequenceid=11, filesize=5.0 K 2024-11-18T20:24:10,204 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/hbase/meta/1588230740/.tmp/table/168072c6ca7f4e919ceeda029151b33c as hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/hbase/meta/1588230740/table/168072c6ca7f4e919ceeda029151b33c 2024-11-18T20:24:10,209 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/hbase/meta/1588230740/table/168072c6ca7f4e919ceeda029151b33c, entries=2, sequenceid=11, filesize=5.4 K 2024-11-18T20:24:10,211 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 94ms, sequenceid=11, compaction requested=false 2024-11-18T20:24:10,216 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-18T20:24:10,216 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:24:10,216 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:24:10,216 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961450116Running coprocessor pre-close hooks at 1731961450116Disabling compacts and flushes for region at 1731961450116Disabling writes for close at 1731961450116Obtaining lock to block concurrent updates at 1731961450116Preparing flush snapshotting stores in 1588230740 at 1731961450116Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731961450116Flushing stores of hbase:meta,,1.1588230740 at 1731961450117 (+1 ms)Flushing 1588230740/info: creating writer at 1731961450117Flushing 1588230740/info: appending metadata at 1731961450131 (+14 ms)Flushing 1588230740/info: closing flushed file at 1731961450131Flushing 1588230740/ns: creating writer at 1731961450142 (+11 ms)Flushing 1588230740/ns: appending metadata at 1731961450156 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731961450156Flushing 1588230740/table: creating writer at 1731961450167 (+11 ms)Flushing 1588230740/table: appending metadata at 1731961450180 (+13 ms)Flushing 1588230740/table: closing flushed file at 1731961450180Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f511ae8: reopening flushed file at 1731961450190 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@551a6564: reopening flushed file at 1731961450197 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@13c37eb9: reopening flushed file at 1731961450203 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 94ms, sequenceid=11, compaction requested=false at 1731961450211 (+8 ms)Writing region close event to WAL at 1731961450213 (+2 ms)Running coprocessor post-close hooks at 1731961450216 (+3 ms)Closed at 1731961450216 2024-11-18T20:24:10,217 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T20:24:10,316 INFO [RS:0;5a964fc427ed:45865 {}] regionserver.HRegionServer(976): stopping server 5a964fc427ed,45865,1731961398366; all regions closed. 2024-11-18T20:24:10,317 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:10,317 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:10,317 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:10,317 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:10,317 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:10,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741834_1010 (size=3306) 2024-11-18T20:24:10,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741834_1010 (size=3306) 2024-11-18T20:24:10,321 DEBUG [RS:0;5a964fc427ed:45865 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/oldWALs 2024-11-18T20:24:10,321 INFO [RS:0;5a964fc427ed:45865 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5a964fc427ed%2C45865%2C1731961398366.meta:.meta(num 1731961399349) 2024-11-18T20:24:10,322 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:10,322 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:10,322 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:10,322 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:10,322 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:10,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741844_1020 (size=1252) 2024-11-18T20:24:10,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741844_1020 (size=1252) 2024-11-18T20:24:10,328 DEBUG [RS:0;5a964fc427ed:45865 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/oldWALs 2024-11-18T20:24:10,328 INFO [RS:0;5a964fc427ed:45865 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5a964fc427ed%2C45865%2C1731961398366:(num 1731961450070) 2024-11-18T20:24:10,328 DEBUG [RS:0;5a964fc427ed:45865 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:24:10,328 INFO [RS:0;5a964fc427ed:45865 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:24:10,328 INFO [RS:0;5a964fc427ed:45865 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:24:10,328 INFO [RS:0;5a964fc427ed:45865 {}] hbase.ChoreService(370): Chore service for: regionserver/5a964fc427ed:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-18T20:24:10,328 INFO [RS:0;5a964fc427ed:45865 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:24:10,329 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:24:10,329 INFO [RS:0;5a964fc427ed:45865 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45865 2024-11-18T20:24:10,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x10150cb69560001, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5a964fc427ed,45865,1731961398366 2024-11-18T20:24:10,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:24:10,339 INFO [RS:0;5a964fc427ed:45865 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:24:10,349 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5a964fc427ed,45865,1731961398366] 2024-11-18T20:24:10,357 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5a964fc427ed,45865,1731961398366 already deleted, retry=false 2024-11-18T20:24:10,358 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5a964fc427ed,45865,1731961398366 expired; onlineServers=0 2024-11-18T20:24:10,358 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5a964fc427ed,37113,1731961398210' ***** 2024-11-18T20:24:10,358 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T20:24:10,358 INFO [M:0;5a964fc427ed:37113 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:24:10,358 INFO [M:0;5a964fc427ed:37113 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:24:10,358 DEBUG [M:0;5a964fc427ed:37113 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T20:24:10,358 DEBUG [M:0;5a964fc427ed:37113 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T20:24:10,358 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T20:24:10,358 DEBUG [master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.large.0-1731961398729 {}] cleaner.HFileCleaner(306): Exit Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.large.0-1731961398729,5,FailOnTimeoutGroup] 2024-11-18T20:24:10,358 DEBUG [master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.small.0-1731961398729 {}] cleaner.HFileCleaner(306): Exit Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.small.0-1731961398729,5,FailOnTimeoutGroup] 2024-11-18T20:24:10,358 INFO [M:0;5a964fc427ed:37113 {}] hbase.ChoreService(370): Chore service for: master/5a964fc427ed:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T20:24:10,358 INFO [M:0;5a964fc427ed:37113 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:24:10,358 DEBUG [M:0;5a964fc427ed:37113 {}] master.HMaster(1795): Stopping service threads 2024-11-18T20:24:10,358 INFO [M:0;5a964fc427ed:37113 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T20:24:10,359 INFO [M:0;5a964fc427ed:37113 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:24:10,359 INFO [M:0;5a964fc427ed:37113 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T20:24:10,359 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T20:24:10,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T20:24:10,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:10,367 INFO [M:0;5a964fc427ed:37113 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/.lastflushedseqids 2024-11-18T20:24:10,374 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-18T20:24:10,374 DEBUG [RegionServerTracker-0 {}] master.ActiveMasterManager(353): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-18T20:24:10,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741849_1025 (size=130) 2024-11-18T20:24:10,378 INFO [M:0;5a964fc427ed:37113 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T20:24:10,379 INFO [M:0;5a964fc427ed:37113 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T20:24:10,379 DEBUG [M:0;5a964fc427ed:37113 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:24:10,379 INFO [M:0;5a964fc427ed:37113 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:24:10,379 DEBUG [M:0;5a964fc427ed:37113 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:24:10,379 DEBUG [M:0;5a964fc427ed:37113 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:24:10,379 DEBUG [M:0;5a964fc427ed:37113 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:24:10,379 INFO [M:0;5a964fc427ed:37113 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.59 KB heapSize=55 KB 2024-11-18T20:24:10,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741849_1025 (size=130) 2024-11-18T20:24:10,403 DEBUG [M:0;5a964fc427ed:37113 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/881e345cf3fd40c9900ede47736578d2 is 82, key is hbase:meta,,1/info:regioninfo/1731961399385/Put/seqid=0 2024-11-18T20:24:10,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741850_1026 (size=5672) 2024-11-18T20:24:10,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741850_1026 (size=5672) 2024-11-18T20:24:10,425 INFO [M:0;5a964fc427ed:37113 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/881e345cf3fd40c9900ede47736578d2 2024-11-18T20:24:10,449 DEBUG [M:0;5a964fc427ed:37113 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a6a6107cd7204153b898f5f2828987f2 is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731961399921/Put/seqid=0 2024-11-18T20:24:10,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x10150cb69560001, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:24:10,449 INFO [RS:0;5a964fc427ed:45865 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:24:10,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45865-0x10150cb69560001, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:24:10,449 INFO [RS:0;5a964fc427ed:45865 {}] regionserver.HRegionServer(1031): Exiting; stopping=5a964fc427ed,45865,1731961398366; zookeeper connection closed. 2024-11-18T20:24:10,450 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1b7d208d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1b7d208d 2024-11-18T20:24:10,450 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T20:24:10,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741851_1027 (size=7823) 2024-11-18T20:24:10,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741851_1027 (size=7823) 2024-11-18T20:24:10,455 INFO [M:0;5a964fc427ed:37113 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.99 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a6a6107cd7204153b898f5f2828987f2 2024-11-18T20:24:10,460 INFO [M:0;5a964fc427ed:37113 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a6a6107cd7204153b898f5f2828987f2 2024-11-18T20:24:10,476 DEBUG [M:0;5a964fc427ed:37113 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6528fbfc538442189ece3323a297d8db is 69, key is 5a964fc427ed,45865,1731961398366/rs:state/1731961398847/Put/seqid=0 2024-11-18T20:24:10,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741852_1028 (size=5156) 2024-11-18T20:24:10,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741852_1028 (size=5156) 2024-11-18T20:24:10,489 INFO [M:0;5a964fc427ed:37113 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6528fbfc538442189ece3323a297d8db 2024-11-18T20:24:10,513 DEBUG [M:0;5a964fc427ed:37113 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bd6d3f887dff494aa764f007b9df4767 is 52, key is load_balancer_on/state:d/1731961399529/Put/seqid=0 2024-11-18T20:24:10,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741853_1029 (size=5056) 2024-11-18T20:24:10,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741853_1029 (size=5056) 2024-11-18T20:24:10,519 INFO [M:0;5a964fc427ed:37113 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bd6d3f887dff494aa764f007b9df4767 2024-11-18T20:24:10,525 DEBUG [M:0;5a964fc427ed:37113 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/881e345cf3fd40c9900ede47736578d2 as hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/881e345cf3fd40c9900ede47736578d2 2024-11-18T20:24:10,531 INFO [M:0;5a964fc427ed:37113 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/881e345cf3fd40c9900ede47736578d2, entries=8, sequenceid=121, filesize=5.5 K 2024-11-18T20:24:10,532 DEBUG [M:0;5a964fc427ed:37113 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a6a6107cd7204153b898f5f2828987f2 as hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a6a6107cd7204153b898f5f2828987f2 2024-11-18T20:24:10,539 INFO [M:0;5a964fc427ed:37113 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a6a6107cd7204153b898f5f2828987f2 2024-11-18T20:24:10,539 INFO [M:0;5a964fc427ed:37113 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a6a6107cd7204153b898f5f2828987f2, entries=14, sequenceid=121, filesize=7.6 K 2024-11-18T20:24:10,540 DEBUG [M:0;5a964fc427ed:37113 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6528fbfc538442189ece3323a297d8db as hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6528fbfc538442189ece3323a297d8db 2024-11-18T20:24:10,545 INFO [M:0;5a964fc427ed:37113 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6528fbfc538442189ece3323a297d8db, entries=1, sequenceid=121, filesize=5.0 K 2024-11-18T20:24:10,546 DEBUG [M:0;5a964fc427ed:37113 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bd6d3f887dff494aa764f007b9df4767 as hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/bd6d3f887dff494aa764f007b9df4767 2024-11-18T20:24:10,550 INFO [M:0;5a964fc427ed:37113 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32799/user/jenkins/test-data/fab3b7e7-622e-c08b-0af8-7081bc11e045/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/bd6d3f887dff494aa764f007b9df4767, entries=1, sequenceid=121, filesize=4.9 K 2024-11-18T20:24:10,552 INFO [M:0;5a964fc427ed:37113 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.59 KB/44641, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 173ms, sequenceid=121, compaction requested=false 2024-11-18T20:24:10,553 INFO [M:0;5a964fc427ed:37113 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:24:10,553 DEBUG [M:0;5a964fc427ed:37113 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961450379Disabling compacts and flushes for region at 1731961450379Disabling writes for close at 1731961450379Obtaining lock to block concurrent updates at 1731961450379Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731961450379Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44641, getHeapSize=56256, getOffHeapSize=0, getCellsCount=140 at 1731961450379Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731961450380 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731961450380Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731961450403 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731961450403Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731961450430 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731961450449 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731961450449Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731961450460 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731961450476 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731961450476Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731961450495 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731961450513 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731961450513Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@333e6b48: reopening flushed file at 1731961450524 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a5c8356: reopening flushed file at 1731961450531 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@434711d7: reopening flushed file at 1731961450539 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35903d32: reopening flushed file at 1731961450545 (+6 ms)Finished flush of dataSize ~43.59 KB/44641, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 173ms, sequenceid=121, compaction requested=false at 1731961450552 (+7 ms)Writing region close event to WAL at 1731961450553 (+1 ms)Closed at 1731961450553 2024-11-18T20:24:10,554 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:10,554 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:10,554 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:10,554 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:10,554 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:10,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36153 is added to blk_1073741830_1006 (size=53038) 2024-11-18T20:24:10,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44019 is added to blk_1073741830_1006 (size=53038) 2024-11-18T20:24:10,557 INFO [M:0;5a964fc427ed:37113 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T20:24:10,557 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:24:10,557 INFO [M:0;5a964fc427ed:37113 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37113 2024-11-18T20:24:10,557 INFO [M:0;5a964fc427ed:37113 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:24:10,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:24:10,666 INFO [M:0;5a964fc427ed:37113 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:24:10,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37113-0x10150cb69560000, quorum=127.0.0.1:54705, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:24:10,669 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2526c219{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:24:10,670 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@74e6f5d9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:24:10,670 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:24:10,670 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4bd1d692{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:24:10,670 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10c2896a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/hadoop.log.dir/,STOPPED} 2024-11-18T20:24:10,673 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:24:10,673 WARN [BP-1810400661-172.17.0.2-1731961396779 heartbeating to localhost/127.0.0.1:32799 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:24:10,673 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:24:10,673 WARN [BP-1810400661-172.17.0.2-1731961396779 heartbeating to localhost/127.0.0.1:32799 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1810400661-172.17.0.2-1731961396779 (Datanode Uuid 10e2ad5a-1ffd-47cd-9db8-95b84e4e59e7) service to localhost/127.0.0.1:32799 2024-11-18T20:24:10,674 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/cluster_3d4bfd17-cc4d-3477-ff33-634d9d85c3c9/data/data3/current/BP-1810400661-172.17.0.2-1731961396779 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:24:10,674 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/cluster_3d4bfd17-cc4d-3477-ff33-634d9d85c3c9/data/data4/current/BP-1810400661-172.17.0.2-1731961396779 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:24:10,675 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:24:10,677 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@43d16ee8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:24:10,677 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5b135886{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:24:10,677 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:24:10,678 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@439cbd97{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:24:10,678 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2cfed7b4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/hadoop.log.dir/,STOPPED} 2024-11-18T20:24:10,679 WARN [BP-1810400661-172.17.0.2-1731961396779 heartbeating to localhost/127.0.0.1:32799 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:24:10,679 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:24:10,679 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:24:10,679 WARN [BP-1810400661-172.17.0.2-1731961396779 heartbeating to localhost/127.0.0.1:32799 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1810400661-172.17.0.2-1731961396779 (Datanode Uuid 6cc33458-e10e-4ffa-8be6-125b0f2e35b7) service to localhost/127.0.0.1:32799 2024-11-18T20:24:10,680 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/cluster_3d4bfd17-cc4d-3477-ff33-634d9d85c3c9/data/data1/current/BP-1810400661-172.17.0.2-1731961396779 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:24:10,680 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/cluster_3d4bfd17-cc4d-3477-ff33-634d9d85c3c9/data/data2/current/BP-1810400661-172.17.0.2-1731961396779 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:24:10,681 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:24:10,687 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d483d07{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:24:10,687 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1106c0e7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:24:10,687 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:24:10,687 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ac76b28{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:24:10,687 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@293e66d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/hadoop.log.dir/,STOPPED} 2024-11-18T20:24:10,696 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T20:24:10,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T20:24:10,729 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=208 (was 181) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32799 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32799 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:32799 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/5a964fc427ed:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:32799 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:32799 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32799 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:32799 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32799 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:32799 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=486 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=315 (was 296) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3945 (was 4144) 2024-11-18T20:24:10,737 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=208, OpenFileDescriptor=486, MaxFileDescriptor=1048576, SystemLoadAverage=315, ProcessCount=11, AvailableMemoryMB=3945 2024-11-18T20:24:10,737 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T20:24:10,737 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/hadoop.log.dir so I do NOT create it in target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a 2024-11-18T20:24:10,737 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8e35ef1-5441-86fa-d486-5d348a1a6ccf/hadoop.tmp.dir so I do NOT create it in target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a 2024-11-18T20:24:10,737 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/cluster_f2d38c7a-5ca8-3461-d525-fb2eb9cf3895, deleteOnExit=true 2024-11-18T20:24:10,737 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T20:24:10,737 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/test.cache.data in system properties and HBase conf 2024-11-18T20:24:10,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T20:24:10,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/hadoop.log.dir in system properties and HBase conf 2024-11-18T20:24:10,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T20:24:10,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T20:24:10,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T20:24:10,738 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T20:24:10,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:24:10,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:24:10,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T20:24:10,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:24:10,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T20:24:10,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T20:24:10,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:24:10,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:24:10,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T20:24:10,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/nfs.dump.dir in system properties and HBase conf 2024-11-18T20:24:10,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/java.io.tmpdir in system properties and HBase conf 2024-11-18T20:24:10,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:24:10,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T20:24:10,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T20:24:10,753 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:24:10,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:10,872 INFO [regionserver/5a964fc427ed:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:24:10,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:10,948 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:24:10,952 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:24:10,953 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:24:10,953 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:24:10,953 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:24:10,954 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:24:10,954 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bc081d8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:24:10,954 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e36d39c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:24:11,078 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f63b03b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/java.io.tmpdir/jetty-localhost-37209-hadoop-hdfs-3_4_1-tests_jar-_-any-7644625483562214031/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:24:11,079 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@622d58de{HTTP/1.1, (http/1.1)}{localhost:37209} 2024-11-18T20:24:11,079 INFO [Time-limited test {}] server.Server(415): Started @242570ms 2024-11-18T20:24:11,090 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:24:11,390 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:24:11,393 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:24:11,394 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:24:11,394 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:24:11,394 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:24:11,395 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35a5806e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:24:11,395 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4bb23947{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:24:11,489 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f5f0b5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/java.io.tmpdir/jetty-localhost-41859-hadoop-hdfs-3_4_1-tests_jar-_-any-14459900430852192704/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:24:11,490 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@31e425dc{HTTP/1.1, (http/1.1)}{localhost:41859} 2024-11-18T20:24:11,490 INFO [Time-limited test {}] server.Server(415): Started @242981ms 2024-11-18T20:24:11,491 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:24:11,517 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:24:11,520 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:24:11,521 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:24:11,521 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:24:11,521 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:24:11,521 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b4ed6ad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:24:11,522 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ab86f9f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:24:11,615 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@444decb3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/java.io.tmpdir/jetty-localhost-35529-hadoop-hdfs-3_4_1-tests_jar-_-any-11622847722401725633/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:24:11,616 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@216b0c63{HTTP/1.1, (http/1.1)}{localhost:35529} 2024-11-18T20:24:11,616 INFO [Time-limited test {}] server.Server(415): Started @243107ms 2024-11-18T20:24:11,617 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:24:11,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:11,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:12,072 WARN [Thread-1964 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/cluster_f2d38c7a-5ca8-3461-d525-fb2eb9cf3895/data/data2/current/BP-1888943501-172.17.0.2-1731961450756/current, will proceed with Du for space computation calculation, 2024-11-18T20:24:12,072 WARN [Thread-1963 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/cluster_f2d38c7a-5ca8-3461-d525-fb2eb9cf3895/data/data1/current/BP-1888943501-172.17.0.2-1731961450756/current, will proceed with Du for space computation calculation, 2024-11-18T20:24:12,090 WARN [Thread-1927 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:24:12,092 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x73485d349c484c36 with lease ID 0xbeaf4273a9632eef: Processing first storage report for DS-a37a49d6-1fa9-44a5-bb9e-58a6549fd284 from datanode DatanodeRegistration(127.0.0.1:37293, datanodeUuid=43c461ee-1478-4e58-8fab-970efe1b68b4, infoPort=43461, infoSecurePort=0, ipcPort=40067, storageInfo=lv=-57;cid=testClusterID;nsid=142075702;c=1731961450756) 2024-11-18T20:24:12,092 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x73485d349c484c36 with lease ID 0xbeaf4273a9632eef: from storage DS-a37a49d6-1fa9-44a5-bb9e-58a6549fd284 node DatanodeRegistration(127.0.0.1:37293, datanodeUuid=43c461ee-1478-4e58-8fab-970efe1b68b4, infoPort=43461, infoSecurePort=0, ipcPort=40067, storageInfo=lv=-57;cid=testClusterID;nsid=142075702;c=1731961450756), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:24:12,092 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x73485d349c484c36 with lease ID 0xbeaf4273a9632eef: Processing first storage report for DS-521a73de-9bd4-4650-a1df-79c85ae18c08 from datanode DatanodeRegistration(127.0.0.1:37293, datanodeUuid=43c461ee-1478-4e58-8fab-970efe1b68b4, infoPort=43461, infoSecurePort=0, ipcPort=40067, storageInfo=lv=-57;cid=testClusterID;nsid=142075702;c=1731961450756) 2024-11-18T20:24:12,092 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x73485d349c484c36 with lease ID 0xbeaf4273a9632eef: from storage DS-521a73de-9bd4-4650-a1df-79c85ae18c08 node DatanodeRegistration(127.0.0.1:37293, datanodeUuid=43c461ee-1478-4e58-8fab-970efe1b68b4, infoPort=43461, infoSecurePort=0, ipcPort=40067, storageInfo=lv=-57;cid=testClusterID;nsid=142075702;c=1731961450756), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:24:12,207 WARN [Thread-1974 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/cluster_f2d38c7a-5ca8-3461-d525-fb2eb9cf3895/data/data3/current/BP-1888943501-172.17.0.2-1731961450756/current, will proceed with Du for space computation calculation, 2024-11-18T20:24:12,207 WARN [Thread-1975 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/cluster_f2d38c7a-5ca8-3461-d525-fb2eb9cf3895/data/data4/current/BP-1888943501-172.17.0.2-1731961450756/current, will proceed with Du for space computation calculation, 2024-11-18T20:24:12,229 WARN [Thread-1950 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:24:12,231 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x38d4cf68aea7df03 with lease ID 0xbeaf4273a9632ef0: Processing first storage report for DS-982d1e66-0aa6-4f02-b1a4-32f0e7e53016 from datanode DatanodeRegistration(127.0.0.1:38111, datanodeUuid=de8f8b72-4878-4eee-8a07-b8fdcc8f78ca, infoPort=38555, infoSecurePort=0, ipcPort=46385, storageInfo=lv=-57;cid=testClusterID;nsid=142075702;c=1731961450756) 2024-11-18T20:24:12,231 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x38d4cf68aea7df03 with lease ID 0xbeaf4273a9632ef0: from storage DS-982d1e66-0aa6-4f02-b1a4-32f0e7e53016 node DatanodeRegistration(127.0.0.1:38111, datanodeUuid=de8f8b72-4878-4eee-8a07-b8fdcc8f78ca, infoPort=38555, infoSecurePort=0, ipcPort=46385, storageInfo=lv=-57;cid=testClusterID;nsid=142075702;c=1731961450756), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:24:12,231 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x38d4cf68aea7df03 with lease ID 0xbeaf4273a9632ef0: Processing first storage report for DS-243cfe7d-cc3f-4a41-a433-8c8e187c911d from datanode DatanodeRegistration(127.0.0.1:38111, datanodeUuid=de8f8b72-4878-4eee-8a07-b8fdcc8f78ca, infoPort=38555, infoSecurePort=0, ipcPort=46385, storageInfo=lv=-57;cid=testClusterID;nsid=142075702;c=1731961450756) 2024-11-18T20:24:12,231 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x38d4cf68aea7df03 with lease ID 0xbeaf4273a9632ef0: from storage DS-243cfe7d-cc3f-4a41-a433-8c8e187c911d node DatanodeRegistration(127.0.0.1:38111, datanodeUuid=de8f8b72-4878-4eee-8a07-b8fdcc8f78ca, infoPort=38555, infoSecurePort=0, ipcPort=46385, storageInfo=lv=-57;cid=testClusterID;nsid=142075702;c=1731961450756), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:24:12,241 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a 2024-11-18T20:24:12,245 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/cluster_f2d38c7a-5ca8-3461-d525-fb2eb9cf3895/zookeeper_0, clientPort=60438, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/cluster_f2d38c7a-5ca8-3461-d525-fb2eb9cf3895/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/cluster_f2d38c7a-5ca8-3461-d525-fb2eb9cf3895/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T20:24:12,246 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60438 2024-11-18T20:24:12,246 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:24:12,248 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:24:12,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:24:12,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:24:12,665 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af with version=8 2024-11-18T20:24:12,665 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/hbase-staging 2024-11-18T20:24:12,670 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5a964fc427ed:0 server-side Connection retries=45 2024-11-18T20:24:12,671 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:24:12,671 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:24:12,671 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:24:12,671 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:24:12,671 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:24:12,671 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T20:24:12,671 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:24:12,672 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44881 2024-11-18T20:24:12,673 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44881 connecting to ZooKeeper ensemble=127.0.0.1:60438 2024-11-18T20:24:12,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:448810x0, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:24:12,749 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44881-0x10150cc3c7c0000 connected 2024-11-18T20:24:12,808 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:24:12,810 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:24:12,813 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:24:12,813 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af, hbase.cluster.distributed=false 2024-11-18T20:24:12,815 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:24:12,816 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44881 2024-11-18T20:24:12,816 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44881 2024-11-18T20:24:12,816 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44881 2024-11-18T20:24:12,822 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44881 2024-11-18T20:24:12,822 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44881 2024-11-18T20:24:12,840 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5a964fc427ed:0 server-side Connection retries=45 2024-11-18T20:24:12,840 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:24:12,840 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:24:12,840 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:24:12,840 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:24:12,840 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:24:12,840 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T20:24:12,841 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:24:12,841 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37687 2024-11-18T20:24:12,843 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37687 connecting to ZooKeeper ensemble=127.0.0.1:60438 2024-11-18T20:24:12,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:12,843 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:24:12,845 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:24:12,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:376870x0, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:24:12,858 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:376870x0, quorum=127.0.0.1:60438, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:24:12,858 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37687-0x10150cc3c7c0001 connected 2024-11-18T20:24:12,858 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T20:24:12,859 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T20:24:12,859 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37687-0x10150cc3c7c0001, quorum=127.0.0.1:60438, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T20:24:12,860 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37687-0x10150cc3c7c0001, quorum=127.0.0.1:60438, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:24:12,861 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37687 2024-11-18T20:24:12,861 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37687 2024-11-18T20:24:12,861 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37687 2024-11-18T20:24:12,862 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37687 2024-11-18T20:24:12,862 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37687 2024-11-18T20:24:12,876 DEBUG [M:0;5a964fc427ed:44881 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5a964fc427ed:44881 2024-11-18T20:24:12,876 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5a964fc427ed,44881,1731961452670 2024-11-18T20:24:12,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37687-0x10150cc3c7c0001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:24:12,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:24:12,883 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5a964fc427ed,44881,1731961452670 2024-11-18T20:24:12,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:12,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:12,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37687-0x10150cc3c7c0001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T20:24:12,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37687-0x10150cc3c7c0001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:12,891 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T20:24:12,892 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5a964fc427ed,44881,1731961452670 from backup master directory 2024-11-18T20:24:12,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5a964fc427ed,44881,1731961452670 2024-11-18T20:24:12,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37687-0x10150cc3c7c0001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:24:12,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:24:12,899 WARN [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:24:12,899 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5a964fc427ed,44881,1731961452670 2024-11-18T20:24:12,904 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/hbase.id] with ID: 270199b4-bc34-420f-9e09-bc07c2311e27 2024-11-18T20:24:12,904 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/.tmp/hbase.id 2024-11-18T20:24:12,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:24:12,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:24:12,909 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/.tmp/hbase.id]:[hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/hbase.id] 2024-11-18T20:24:12,919 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:24:12,919 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T20:24:12,920 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-18T20:24:12,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37687-0x10150cc3c7c0001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:12,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:12,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:24:12,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:24:12,937 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:24:12,937 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T20:24:12,937 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:24:12,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:24:12,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:24:12,945 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store 2024-11-18T20:24:12,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:24:12,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:24:12,952 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:24:12,952 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:24:12,952 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:24:12,952 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:24:12,952 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:24:12,952 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:24:12,952 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:24:12,952 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961452952Disabling compacts and flushes for region at 1731961452952Disabling writes for close at 1731961452952Writing region close event to WAL at 1731961452952Closed at 1731961452952 2024-11-18T20:24:12,953 WARN [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/.initializing 2024-11-18T20:24:12,953 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/WALs/5a964fc427ed,44881,1731961452670 2024-11-18T20:24:12,955 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5a964fc427ed%2C44881%2C1731961452670, suffix=, logDir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/WALs/5a964fc427ed,44881,1731961452670, archiveDir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/oldWALs, maxLogs=10 2024-11-18T20:24:12,956 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C44881%2C1731961452670.1731961452956 2024-11-18T20:24:12,960 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/WALs/5a964fc427ed,44881,1731961452670/5a964fc427ed%2C44881%2C1731961452670.1731961452956 2024-11-18T20:24:12,961 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38555:38555),(127.0.0.1/127.0.0.1:43461:43461)] 2024-11-18T20:24:12,961 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:24:12,961 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:24:12,961 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:12,961 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:12,962 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:12,963 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T20:24:12,964 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:12,964 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:24:12,964 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:12,965 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T20:24:12,965 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:12,965 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:24:12,965 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:12,966 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T20:24:12,966 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:12,967 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:24:12,967 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:12,968 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T20:24:12,968 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:12,968 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:24:12,968 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:12,969 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:12,969 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:12,970 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:12,970 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:12,971 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T20:24:12,971 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:12,973 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:24:12,974 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=712450, jitterRate=-0.09407401084899902}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T20:24:12,974 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731961452961Initializing all the Stores at 1731961452962 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961452962Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961452962Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961452962Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961452962Cleaning up temporary data from old regions at 1731961452970 (+8 ms)Region opened successfully at 1731961452974 (+4 ms) 2024-11-18T20:24:12,974 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T20:24:12,977 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43bf0184, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5a964fc427ed/172.17.0.2:0 2024-11-18T20:24:12,978 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T20:24:12,978 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T20:24:12,978 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T20:24:12,979 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T20:24:12,979 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T20:24:12,979 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T20:24:12,979 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T20:24:12,982 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T20:24:12,982 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T20:24:12,990 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T20:24:12,991 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T20:24:12,991 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T20:24:12,999 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T20:24:12,999 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T20:24:13,000 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T20:24:13,007 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T20:24:13,009 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T20:24:13,015 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T20:24:13,017 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T20:24:13,024 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T20:24:13,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:24:13,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37687-0x10150cc3c7c0001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:24:13,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:13,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37687-0x10150cc3c7c0001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:13,033 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5a964fc427ed,44881,1731961452670, sessionid=0x10150cc3c7c0000, setting cluster-up flag (Was=false) 2024-11-18T20:24:13,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:13,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37687-0x10150cc3c7c0001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:13,074 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T20:24:13,078 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5a964fc427ed,44881,1731961452670 2024-11-18T20:24:13,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:13,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37687-0x10150cc3c7c0001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:13,124 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T20:24:13,125 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5a964fc427ed,44881,1731961452670 2024-11-18T20:24:13,126 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T20:24:13,128 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T20:24:13,128 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T20:24:13,128 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T20:24:13,128 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5a964fc427ed,44881,1731961452670 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T20:24:13,130 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:24:13,130 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:24:13,130 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:24:13,130 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:24:13,130 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5a964fc427ed:0, corePoolSize=10, maxPoolSize=10 2024-11-18T20:24:13,130 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:13,130 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5a964fc427ed:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:24:13,130 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:13,131 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731961483131 2024-11-18T20:24:13,131 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T20:24:13,131 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T20:24:13,131 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T20:24:13,131 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T20:24:13,131 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T20:24:13,131 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T20:24:13,132 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:13,132 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:24:13,132 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T20:24:13,132 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T20:24:13,132 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T20:24:13,132 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T20:24:13,132 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T20:24:13,132 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T20:24:13,133 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:13,133 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T20:24:13,134 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.large.0-1731961453132,5,FailOnTimeoutGroup] 2024-11-18T20:24:13,135 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.small.0-1731961453134,5,FailOnTimeoutGroup] 2024-11-18T20:24:13,135 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:13,135 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T20:24:13,135 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:13,135 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:13,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:24:13,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:24:13,140 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T20:24:13,140 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af 2024-11-18T20:24:13,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:24:13,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:24:13,148 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:24:13,149 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:24:13,150 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:24:13,150 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:13,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:24:13,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:24:13,152 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:24:13,152 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:13,153 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:24:13,153 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:24:13,154 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:24:13,154 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:13,154 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:24:13,154 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:24:13,155 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:24:13,155 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:13,155 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:24:13,155 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:24:13,156 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740 2024-11-18T20:24:13,156 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740 2024-11-18T20:24:13,157 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:24:13,157 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:24:13,158 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:24:13,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:24:13,160 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:24:13,161 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=740793, jitterRate=-0.05803409218788147}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:24:13,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731961453148Initializing all the Stores at 1731961453149 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961453149Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961453149Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961453149Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961453149Cleaning up temporary data from old regions at 1731961453157 (+8 ms)Region opened successfully at 1731961453161 (+4 ms) 2024-11-18T20:24:13,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:24:13,161 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:24:13,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:24:13,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:24:13,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:24:13,162 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:24:13,162 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961453161Disabling compacts and flushes for region at 1731961453161Disabling writes for close at 1731961453161Writing region close event to WAL at 1731961453162 (+1 ms)Closed at 1731961453162 2024-11-18T20:24:13,163 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:24:13,163 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T20:24:13,163 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T20:24:13,164 INFO [RS:0;5a964fc427ed:37687 {}] regionserver.HRegionServer(746): ClusterId : 270199b4-bc34-420f-9e09-bc07c2311e27 2024-11-18T20:24:13,164 DEBUG [RS:0;5a964fc427ed:37687 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T20:24:13,164 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:24:13,165 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T20:24:13,173 DEBUG [RS:0;5a964fc427ed:37687 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T20:24:13,173 DEBUG [RS:0;5a964fc427ed:37687 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T20:24:13,183 DEBUG [RS:0;5a964fc427ed:37687 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T20:24:13,183 DEBUG [RS:0;5a964fc427ed:37687 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c61b316, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5a964fc427ed/172.17.0.2:0 2024-11-18T20:24:13,194 DEBUG [RS:0;5a964fc427ed:37687 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5a964fc427ed:37687 2024-11-18T20:24:13,194 INFO [RS:0;5a964fc427ed:37687 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T20:24:13,194 INFO [RS:0;5a964fc427ed:37687 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T20:24:13,194 DEBUG [RS:0;5a964fc427ed:37687 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T20:24:13,195 INFO [RS:0;5a964fc427ed:37687 {}] regionserver.HRegionServer(2659): reportForDuty to master=5a964fc427ed,44881,1731961452670 with port=37687, startcode=1731961452840 2024-11-18T20:24:13,195 DEBUG [RS:0;5a964fc427ed:37687 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T20:24:13,197 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49147, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T20:24:13,198 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44881 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5a964fc427ed,37687,1731961452840 2024-11-18T20:24:13,198 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44881 {}] master.ServerManager(517): Registering regionserver=5a964fc427ed,37687,1731961452840 2024-11-18T20:24:13,200 DEBUG [RS:0;5a964fc427ed:37687 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af 2024-11-18T20:24:13,200 DEBUG [RS:0;5a964fc427ed:37687 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35761 2024-11-18T20:24:13,200 DEBUG [RS:0;5a964fc427ed:37687 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T20:24:13,207 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:24:13,208 DEBUG [RS:0;5a964fc427ed:37687 {}] zookeeper.ZKUtil(111): regionserver:37687-0x10150cc3c7c0001, quorum=127.0.0.1:60438, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5a964fc427ed,37687,1731961452840 2024-11-18T20:24:13,208 WARN [RS:0;5a964fc427ed:37687 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:24:13,208 INFO [RS:0;5a964fc427ed:37687 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:24:13,208 DEBUG [RS:0;5a964fc427ed:37687 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/WALs/5a964fc427ed,37687,1731961452840 2024-11-18T20:24:13,208 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5a964fc427ed,37687,1731961452840] 2024-11-18T20:24:13,211 INFO [RS:0;5a964fc427ed:37687 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T20:24:13,213 INFO [RS:0;5a964fc427ed:37687 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T20:24:13,218 INFO [RS:0;5a964fc427ed:37687 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T20:24:13,218 INFO [RS:0;5a964fc427ed:37687 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:13,219 INFO [RS:0;5a964fc427ed:37687 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T20:24:13,219 INFO [RS:0;5a964fc427ed:37687 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T20:24:13,220 INFO [RS:0;5a964fc427ed:37687 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:13,220 DEBUG [RS:0;5a964fc427ed:37687 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:13,220 DEBUG [RS:0;5a964fc427ed:37687 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:13,220 DEBUG [RS:0;5a964fc427ed:37687 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:13,220 DEBUG [RS:0;5a964fc427ed:37687 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:13,220 DEBUG [RS:0;5a964fc427ed:37687 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:13,220 DEBUG [RS:0;5a964fc427ed:37687 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5a964fc427ed:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:24:13,220 DEBUG [RS:0;5a964fc427ed:37687 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:13,220 DEBUG [RS:0;5a964fc427ed:37687 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:13,220 DEBUG [RS:0;5a964fc427ed:37687 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:13,220 DEBUG [RS:0;5a964fc427ed:37687 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:13,220 DEBUG [RS:0;5a964fc427ed:37687 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:13,220 DEBUG [RS:0;5a964fc427ed:37687 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:13,220 DEBUG [RS:0;5a964fc427ed:37687 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5a964fc427ed:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:24:13,220 DEBUG [RS:0;5a964fc427ed:37687 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:24:13,221 INFO [RS:0;5a964fc427ed:37687 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:13,221 INFO [RS:0;5a964fc427ed:37687 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:13,221 INFO [RS:0;5a964fc427ed:37687 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:13,221 INFO [RS:0;5a964fc427ed:37687 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:13,221 INFO [RS:0;5a964fc427ed:37687 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:13,221 INFO [RS:0;5a964fc427ed:37687 {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,37687,1731961452840-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:24:13,236 INFO [RS:0;5a964fc427ed:37687 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T20:24:13,236 INFO [RS:0;5a964fc427ed:37687 {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,37687,1731961452840-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:13,236 INFO [RS:0;5a964fc427ed:37687 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:13,236 INFO [RS:0;5a964fc427ed:37687 {}] regionserver.Replication(171): 5a964fc427ed,37687,1731961452840 started 2024-11-18T20:24:13,250 INFO [RS:0;5a964fc427ed:37687 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:13,250 INFO [RS:0;5a964fc427ed:37687 {}] regionserver.HRegionServer(1482): Serving as 5a964fc427ed,37687,1731961452840, RpcServer on 5a964fc427ed/172.17.0.2:37687, sessionid=0x10150cc3c7c0001 2024-11-18T20:24:13,250 DEBUG [RS:0;5a964fc427ed:37687 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T20:24:13,250 DEBUG [RS:0;5a964fc427ed:37687 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5a964fc427ed,37687,1731961452840 2024-11-18T20:24:13,250 DEBUG [RS:0;5a964fc427ed:37687 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5a964fc427ed,37687,1731961452840' 2024-11-18T20:24:13,250 DEBUG [RS:0;5a964fc427ed:37687 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T20:24:13,251 DEBUG [RS:0;5a964fc427ed:37687 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T20:24:13,251 DEBUG [RS:0;5a964fc427ed:37687 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T20:24:13,251 DEBUG [RS:0;5a964fc427ed:37687 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T20:24:13,251 DEBUG [RS:0;5a964fc427ed:37687 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5a964fc427ed,37687,1731961452840 2024-11-18T20:24:13,252 DEBUG [RS:0;5a964fc427ed:37687 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5a964fc427ed,37687,1731961452840' 2024-11-18T20:24:13,252 DEBUG [RS:0;5a964fc427ed:37687 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T20:24:13,252 DEBUG [RS:0;5a964fc427ed:37687 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T20:24:13,252 DEBUG [RS:0;5a964fc427ed:37687 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T20:24:13,252 INFO [RS:0;5a964fc427ed:37687 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T20:24:13,252 INFO [RS:0;5a964fc427ed:37687 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T20:24:13,316 WARN [5a964fc427ed:44881 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T20:24:13,355 INFO [RS:0;5a964fc427ed:37687 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5a964fc427ed%2C37687%2C1731961452840, suffix=, logDir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/WALs/5a964fc427ed,37687,1731961452840, archiveDir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/oldWALs, maxLogs=32 2024-11-18T20:24:13,355 INFO [RS:0;5a964fc427ed:37687 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C37687%2C1731961452840.1731961453355 2024-11-18T20:24:13,362 INFO [RS:0;5a964fc427ed:37687 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/WALs/5a964fc427ed,37687,1731961452840/5a964fc427ed%2C37687%2C1731961452840.1731961453355 2024-11-18T20:24:13,364 DEBUG [RS:0;5a964fc427ed:37687 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38555:38555),(127.0.0.1/127.0.0.1:43461:43461)] 2024-11-18T20:24:13,566 DEBUG [5a964fc427ed:44881 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T20:24:13,567 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5a964fc427ed,37687,1731961452840 2024-11-18T20:24:13,569 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5a964fc427ed,37687,1731961452840, state=OPENING 2024-11-18T20:24:13,605 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T20:24:13,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37687-0x10150cc3c7c0001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:13,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:13,618 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:24:13,618 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:24:13,618 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:24:13,618 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5a964fc427ed,37687,1731961452840}] 2024-11-18T20:24:13,774 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T20:24:13,778 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36613, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T20:24:13,784 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T20:24:13,785 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:24:13,787 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5a964fc427ed%2C37687%2C1731961452840.meta, suffix=.meta, logDir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/WALs/5a964fc427ed,37687,1731961452840, archiveDir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/oldWALs, maxLogs=32 2024-11-18T20:24:13,787 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C37687%2C1731961452840.meta.1731961453787.meta 2024-11-18T20:24:13,792 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/WALs/5a964fc427ed,37687,1731961452840/5a964fc427ed%2C37687%2C1731961452840.meta.1731961453787.meta 2024-11-18T20:24:13,793 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43461:43461),(127.0.0.1/127.0.0.1:38555:38555)] 2024-11-18T20:24:13,794 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:24:13,794 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T20:24:13,794 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T20:24:13,794 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T20:24:13,794 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T20:24:13,795 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:24:13,795 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T20:24:13,795 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T20:24:13,796 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:24:13,797 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:24:13,797 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:13,797 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:24:13,797 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:24:13,798 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:24:13,798 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:13,798 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:24:13,798 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:24:13,799 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:24:13,799 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:13,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:24:13,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:24:13,800 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:24:13,800 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:13,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:24:13,800 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:24:13,801 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740 2024-11-18T20:24:13,802 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740 2024-11-18T20:24:13,803 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:24:13,803 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:24:13,804 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:24:13,805 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:24:13,806 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=707406, jitterRate=-0.10048732161521912}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:24:13,806 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T20:24:13,807 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731961453795Writing region info on filesystem at 1731961453795Initializing all the Stores at 1731961453796 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961453796Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961453796Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961453796Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961453796Cleaning up temporary data from old regions at 1731961453803 (+7 ms)Running coprocessor post-open hooks at 1731961453806 (+3 ms)Region opened successfully at 1731961453807 (+1 ms) 2024-11-18T20:24:13,808 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731961453774 2024-11-18T20:24:13,810 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T20:24:13,810 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T20:24:13,811 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5a964fc427ed,37687,1731961452840 2024-11-18T20:24:13,812 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5a964fc427ed,37687,1731961452840, state=OPEN 2024-11-18T20:24:13,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:24:13,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37687-0x10150cc3c7c0001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:24:13,840 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5a964fc427ed,37687,1731961452840 2024-11-18T20:24:13,840 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:24:13,841 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:24:13,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:13,844 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T20:24:13,845 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5a964fc427ed,37687,1731961452840 in 222 msec 2024-11-18T20:24:13,847 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T20:24:13,847 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 682 msec 2024-11-18T20:24:13,848 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:24:13,848 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T20:24:13,849 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:24:13,849 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5a964fc427ed,37687,1731961452840, seqNum=-1] 2024-11-18T20:24:13,850 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:24:13,851 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39799, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:24:13,856 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 728 msec 2024-11-18T20:24:13,856 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731961453856, completionTime=-1 2024-11-18T20:24:13,857 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T20:24:13,857 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T20:24:13,858 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T20:24:13,858 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731961513858 2024-11-18T20:24:13,859 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731961573859 2024-11-18T20:24:13,859 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-18T20:24:13,859 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,44881,1731961452670-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:13,859 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,44881,1731961452670-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:13,859 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,44881,1731961452670-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:13,859 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5a964fc427ed:44881, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:13,859 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:13,859 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:13,861 DEBUG [master/5a964fc427ed:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T20:24:13,864 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.965sec 2024-11-18T20:24:13,864 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T20:24:13,864 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T20:24:13,864 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T20:24:13,864 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T20:24:13,864 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T20:24:13,864 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,44881,1731961452670-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:24:13,864 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,44881,1731961452670-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T20:24:13,867 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T20:24:13,867 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T20:24:13,867 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,44881,1731961452670-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:13,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:13,964 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@94ae242, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:24:13,964 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5a964fc427ed,44881,-1 for getting cluster id 2024-11-18T20:24:13,965 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T20:24:13,966 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '270199b4-bc34-420f-9e09-bc07c2311e27' 2024-11-18T20:24:13,966 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T20:24:13,966 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "270199b4-bc34-420f-9e09-bc07c2311e27" 2024-11-18T20:24:13,967 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5213b03, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:24:13,967 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5a964fc427ed,44881,-1] 2024-11-18T20:24:13,967 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T20:24:13,967 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:24:13,968 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43542, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T20:24:13,970 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a437f25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:24:13,970 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:24:13,971 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5a964fc427ed,37687,1731961452840, seqNum=-1] 2024-11-18T20:24:13,972 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:24:13,973 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60200, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:24:13,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5a964fc427ed,44881,1731961452670 2024-11-18T20:24:13,975 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:24:13,979 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T20:24:13,979 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T20:24:13,980 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 5a964fc427ed,44881,1731961452670 2024-11-18T20:24:13,980 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@31bb9800 2024-11-18T20:24:13,980 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T20:24:13,982 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43546, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T20:24:13,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44881 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T20:24:13,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44881 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T20:24:13,983 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44881 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:24:13,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44881 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-18T20:24:13,986 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T20:24:13,986 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:13,986 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44881 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-18T20:24:13,987 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T20:24:13,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44881 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T20:24:13,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741835_1011 (size=381) 2024-11-18T20:24:13,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741835_1011 (size=381) 2024-11-18T20:24:13,996 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3081dea91b057915b834d5b83e904f00, NAME => 'TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af 2024-11-18T20:24:14,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741836_1012 (size=64) 2024-11-18T20:24:14,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741836_1012 (size=64) 2024-11-18T20:24:14,002 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:24:14,003 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 3081dea91b057915b834d5b83e904f00, disabling compactions & flushes 2024-11-18T20:24:14,003 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00. 2024-11-18T20:24:14,003 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00. 2024-11-18T20:24:14,003 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00. after waiting 0 ms 2024-11-18T20:24:14,003 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00. 2024-11-18T20:24:14,003 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00. 2024-11-18T20:24:14,003 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3081dea91b057915b834d5b83e904f00: Waiting for close lock at 1731961454003Disabling compacts and flushes for region at 1731961454003Disabling writes for close at 1731961454003Writing region close event to WAL at 1731961454003Closed at 1731961454003 2024-11-18T20:24:14,004 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T20:24:14,004 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731961454004"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731961454004"}]},"ts":"1731961454004"} 2024-11-18T20:24:14,007 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T20:24:14,008 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T20:24:14,008 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731961454008"}]},"ts":"1731961454008"} 2024-11-18T20:24:14,010 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-18T20:24:14,011 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3081dea91b057915b834d5b83e904f00, ASSIGN}] 2024-11-18T20:24:14,012 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3081dea91b057915b834d5b83e904f00, ASSIGN 2024-11-18T20:24:14,013 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3081dea91b057915b834d5b83e904f00, ASSIGN; state=OFFLINE, location=5a964fc427ed,37687,1731961452840; forceNewPlan=false, retain=false 2024-11-18T20:24:14,164 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3081dea91b057915b834d5b83e904f00, regionState=OPENING, regionLocation=5a964fc427ed,37687,1731961452840 2024-11-18T20:24:14,166 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3081dea91b057915b834d5b83e904f00, ASSIGN because future has completed 2024-11-18T20:24:14,167 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3081dea91b057915b834d5b83e904f00, server=5a964fc427ed,37687,1731961452840}] 2024-11-18T20:24:14,327 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00. 2024-11-18T20:24:14,327 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 3081dea91b057915b834d5b83e904f00, NAME => 'TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:24:14,327 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:14,328 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:24:14,328 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:14,328 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:14,330 INFO [StoreOpener-3081dea91b057915b834d5b83e904f00-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:14,332 INFO [StoreOpener-3081dea91b057915b834d5b83e904f00-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3081dea91b057915b834d5b83e904f00 columnFamilyName info 2024-11-18T20:24:14,332 DEBUG [StoreOpener-3081dea91b057915b834d5b83e904f00-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:14,333 INFO [StoreOpener-3081dea91b057915b834d5b83e904f00-1 {}] regionserver.HStore(327): Store=3081dea91b057915b834d5b83e904f00/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:24:14,333 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:14,335 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:14,335 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:14,336 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:14,336 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:14,339 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:14,342 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:24:14,343 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 3081dea91b057915b834d5b83e904f00; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=692698, jitterRate=-0.11918924748897552}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T20:24:14,343 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:14,344 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 3081dea91b057915b834d5b83e904f00: Running coprocessor pre-open hook at 1731961454328Writing region info on filesystem at 1731961454328Initializing all the Stores at 1731961454329 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961454330 (+1 ms)Cleaning up temporary data from old regions at 1731961454336 (+6 ms)Running coprocessor post-open hooks at 1731961454343 (+7 ms)Region opened successfully at 1731961454344 (+1 ms) 2024-11-18T20:24:14,345 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00., pid=6, masterSystemTime=1731961454319 2024-11-18T20:24:14,348 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00. 2024-11-18T20:24:14,348 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00. 2024-11-18T20:24:14,350 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3081dea91b057915b834d5b83e904f00, regionState=OPEN, openSeqNum=2, regionLocation=5a964fc427ed,37687,1731961452840 2024-11-18T20:24:14,353 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3081dea91b057915b834d5b83e904f00, server=5a964fc427ed,37687,1731961452840 because future has completed 2024-11-18T20:24:14,358 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T20:24:14,359 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 3081dea91b057915b834d5b83e904f00, server=5a964fc427ed,37687,1731961452840 in 189 msec 2024-11-18T20:24:14,361 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T20:24:14,361 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3081dea91b057915b834d5b83e904f00, ASSIGN in 348 msec 2024-11-18T20:24:14,362 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T20:24:14,362 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731961454362"}]},"ts":"1731961454362"} 2024-11-18T20:24:14,365 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-18T20:24:14,366 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T20:24:14,368 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 383 msec 2024-11-18T20:24:14,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:14,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:15,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,154 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,154 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,154 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,154 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,155 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,156 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,156 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,177 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,177 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,181 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,181 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,181 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,183 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,688 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T20:24:15,689 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,689 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,689 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,689 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,689 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,689 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,690 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,690 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,712 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,712 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,713 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,713 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,713 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,713 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:15,725 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-18T20:24:15,725 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-18T20:24:15,725 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:24:15,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:15,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:16,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:16,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:17,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:17,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:18,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:18,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:19,211 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T20:24:19,212 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-18T20:24:19,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:19,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:20,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:20,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:21,230 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T20:24:21,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:21,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:21,232 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:21,232 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:21,233 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:21,233 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:21,234 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:21,234 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:21,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:21,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:21,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:21,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:21,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:21,262 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:21,265 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:21,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:21,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:21,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:21,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:21,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:22,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:22,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:23,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:23,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:24,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44881 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T20:24:24,055 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-18T20:24:24,055 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-18T20:24:24,059 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-18T20:24:24,059 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00. 2024-11-18T20:24:24,061 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00., hostname=5a964fc427ed,37687,1731961452840, seqNum=2] 2024-11-18T20:24:24,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:24,078 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3081dea91b057915b834d5b83e904f00 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T20:24:24,099 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/978d6aad42154c539030ce8b39ba8506 is 1080, key is row0001/info:/1731961464062/Put/seqid=0 2024-11-18T20:24:24,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741837_1013 (size=12509) 2024-11-18T20:24:24,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741837_1013 (size=12509) 2024-11-18T20:24:24,113 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/978d6aad42154c539030ce8b39ba8506 2024-11-18T20:24:24,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/978d6aad42154c539030ce8b39ba8506 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/978d6aad42154c539030ce8b39ba8506 2024-11-18T20:24:24,127 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/978d6aad42154c539030ce8b39ba8506, entries=7, sequenceid=11, filesize=12.2 K 2024-11-18T20:24:24,128 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 3081dea91b057915b834d5b83e904f00 in 50ms, sequenceid=11, compaction requested=false 2024-11-18T20:24:24,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3081dea91b057915b834d5b83e904f00: 2024-11-18T20:24:24,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:24,129 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3081dea91b057915b834d5b83e904f00 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-18T20:24:24,134 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/c965756ffac7438c96051ebaa54179f5 is 1080, key is row0008/info:/1731961464080/Put/seqid=0 2024-11-18T20:24:24,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741838_1014 (size=29761) 2024-11-18T20:24:24,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741838_1014 (size=29761) 2024-11-18T20:24:24,139 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/c965756ffac7438c96051ebaa54179f5 2024-11-18T20:24:24,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/c965756ffac7438c96051ebaa54179f5 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/c965756ffac7438c96051ebaa54179f5 2024-11-18T20:24:24,152 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/c965756ffac7438c96051ebaa54179f5, entries=23, sequenceid=37, filesize=29.1 K 2024-11-18T20:24:24,153 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 3081dea91b057915b834d5b83e904f00 in 24ms, sequenceid=37, compaction requested=false 2024-11-18T20:24:24,153 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3081dea91b057915b834d5b83e904f00: 2024-11-18T20:24:24,153 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-18T20:24:24,153 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:24:24,153 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/c965756ffac7438c96051ebaa54179f5 because midkey is the same as first or last row 2024-11-18T20:24:24,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:24,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:25,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:25,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:26,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:26,149 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3081dea91b057915b834d5b83e904f00 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T20:24:26,156 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/e12e426c89e74c798ed4fb54d6683177 is 1080, key is row0031/info:/1731961464130/Put/seqid=0 2024-11-18T20:24:26,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741839_1015 (size=12509) 2024-11-18T20:24:26,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741839_1015 (size=12509) 2024-11-18T20:24:26,162 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/e12e426c89e74c798ed4fb54d6683177 2024-11-18T20:24:26,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/e12e426c89e74c798ed4fb54d6683177 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/e12e426c89e74c798ed4fb54d6683177 2024-11-18T20:24:26,177 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/e12e426c89e74c798ed4fb54d6683177, entries=7, sequenceid=47, filesize=12.2 K 2024-11-18T20:24:26,178 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 3081dea91b057915b834d5b83e904f00 in 28ms, sequenceid=47, compaction requested=true 2024-11-18T20:24:26,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3081dea91b057915b834d5b83e904f00: 2024-11-18T20:24:26,178 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-18T20:24:26,178 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:24:26,178 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/c965756ffac7438c96051ebaa54179f5 because midkey is the same as first or last row 2024-11-18T20:24:26,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:26,178 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3081dea91b057915b834d5b83e904f00:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:24:26,178 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:26,178 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:24:26,178 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3081dea91b057915b834d5b83e904f00 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-18T20:24:26,180 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:24:26,180 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1541): 3081dea91b057915b834d5b83e904f00/info is initiating minor compaction (all files) 2024-11-18T20:24:26,180 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 3081dea91b057915b834d5b83e904f00/info in TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00. 2024-11-18T20:24:26,180 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/978d6aad42154c539030ce8b39ba8506, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/c965756ffac7438c96051ebaa54179f5, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/e12e426c89e74c798ed4fb54d6683177] into tmpdir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp, totalSize=53.5 K 2024-11-18T20:24:26,181 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 978d6aad42154c539030ce8b39ba8506, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731961464062 2024-11-18T20:24:26,181 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting c965756ffac7438c96051ebaa54179f5, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731961464080 2024-11-18T20:24:26,181 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting e12e426c89e74c798ed4fb54d6683177, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731961464130 2024-11-18T20:24:26,183 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/b1e644e0370741b1baaaea92d0cb6339 is 1080, key is row0038/info:/1731961466151/Put/seqid=0 2024-11-18T20:24:26,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741840_1016 (size=16817) 2024-11-18T20:24:26,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741840_1016 (size=16817) 2024-11-18T20:24:26,190 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=61 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/b1e644e0370741b1baaaea92d0cb6339 2024-11-18T20:24:26,199 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3081dea91b057915b834d5b83e904f00#info#compaction#59 average throughput is 12.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:24:26,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/b1e644e0370741b1baaaea92d0cb6339 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/b1e644e0370741b1baaaea92d0cb6339 2024-11-18T20:24:26,200 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/18b6c82e88d447ccb4008b94de9f53ab is 1080, key is row0001/info:/1731961464062/Put/seqid=0 2024-11-18T20:24:26,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741841_1017 (size=44978) 2024-11-18T20:24:26,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741841_1017 (size=44978) 2024-11-18T20:24:26,207 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/b1e644e0370741b1baaaea92d0cb6339, entries=11, sequenceid=61, filesize=16.4 K 2024-11-18T20:24:26,208 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=11.56 KB/11836 for 3081dea91b057915b834d5b83e904f00 in 30ms, sequenceid=61, compaction requested=false 2024-11-18T20:24:26,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3081dea91b057915b834d5b83e904f00: 2024-11-18T20:24:26,208 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=69.9 K, sizeToCheck=16.0 K 2024-11-18T20:24:26,208 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:24:26,208 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/c965756ffac7438c96051ebaa54179f5 because midkey is the same as first or last row 2024-11-18T20:24:26,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:26,210 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3081dea91b057915b834d5b83e904f00 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-18T20:24:26,215 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/18b6c82e88d447ccb4008b94de9f53ab as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/18b6c82e88d447ccb4008b94de9f53ab 2024-11-18T20:24:26,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/0b9097f8a8d3419eada6fa71f749bf93 is 1080, key is row0049/info:/1731961466180/Put/seqid=0 2024-11-18T20:24:26,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741842_1018 (size=17894) 2024-11-18T20:24:26,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741842_1018 (size=17894) 2024-11-18T20:24:26,227 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/0b9097f8a8d3419eada6fa71f749bf93 2024-11-18T20:24:26,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/0b9097f8a8d3419eada6fa71f749bf93 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/0b9097f8a8d3419eada6fa71f749bf93 2024-11-18T20:24:26,234 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 3081dea91b057915b834d5b83e904f00/info of 3081dea91b057915b834d5b83e904f00 into 18b6c82e88d447ccb4008b94de9f53ab(size=43.9 K), total size for store is 60.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:24:26,234 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 3081dea91b057915b834d5b83e904f00: 2024-11-18T20:24:26,234 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00., storeName=3081dea91b057915b834d5b83e904f00/info, priority=13, startTime=1731961466178; duration=0sec 2024-11-18T20:24:26,234 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=60.3 K, sizeToCheck=16.0 K 2024-11-18T20:24:26,234 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:24:26,234 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/18b6c82e88d447ccb4008b94de9f53ab because midkey is the same as first or last row 2024-11-18T20:24:26,234 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=60.3 K, sizeToCheck=16.0 K 2024-11-18T20:24:26,234 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:24:26,234 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/18b6c82e88d447ccb4008b94de9f53ab because midkey is the same as first or last row 2024-11-18T20:24:26,235 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=60.3 K, sizeToCheck=16.0 K 2024-11-18T20:24:26,235 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:24:26,235 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/18b6c82e88d447ccb4008b94de9f53ab because midkey is the same as first or last row 2024-11-18T20:24:26,235 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:26,235 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3081dea91b057915b834d5b83e904f00:info 2024-11-18T20:24:26,239 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/0b9097f8a8d3419eada6fa71f749bf93, entries=12, sequenceid=76, filesize=17.5 K 2024-11-18T20:24:26,240 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=4.20 KB/4304 for 3081dea91b057915b834d5b83e904f00 in 31ms, sequenceid=76, compaction requested=true 2024-11-18T20:24:26,240 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3081dea91b057915b834d5b83e904f00: 2024-11-18T20:24:26,240 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=77.8 K, sizeToCheck=16.0 K 2024-11-18T20:24:26,240 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:24:26,240 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/18b6c82e88d447ccb4008b94de9f53ab because midkey is the same as first or last row 2024-11-18T20:24:26,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3081dea91b057915b834d5b83e904f00:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:24:26,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:26,240 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:24:26,241 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 79689 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:24:26,241 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1541): 3081dea91b057915b834d5b83e904f00/info is initiating minor compaction (all files) 2024-11-18T20:24:26,242 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 3081dea91b057915b834d5b83e904f00/info in TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00. 2024-11-18T20:24:26,242 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/18b6c82e88d447ccb4008b94de9f53ab, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/b1e644e0370741b1baaaea92d0cb6339, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/0b9097f8a8d3419eada6fa71f749bf93] into tmpdir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp, totalSize=77.8 K 2024-11-18T20:24:26,242 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 18b6c82e88d447ccb4008b94de9f53ab, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731961464062 2024-11-18T20:24:26,242 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting b1e644e0370741b1baaaea92d0cb6339, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=61, earliestPutTs=1731961466151 2024-11-18T20:24:26,243 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0b9097f8a8d3419eada6fa71f749bf93, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1731961466180 2024-11-18T20:24:26,253 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3081dea91b057915b834d5b83e904f00#info#compaction#61 average throughput is 30.78 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:24:26,254 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/1011320473f44f9296269365d506fa10 is 1080, key is row0001/info:/1731961464062/Put/seqid=0 2024-11-18T20:24:26,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741843_1019 (size=69920) 2024-11-18T20:24:26,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741843_1019 (size=69920) 2024-11-18T20:24:26,266 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/1011320473f44f9296269365d506fa10 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/1011320473f44f9296269365d506fa10 2024-11-18T20:24:26,272 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 3081dea91b057915b834d5b83e904f00/info of 3081dea91b057915b834d5b83e904f00 into 1011320473f44f9296269365d506fa10(size=68.3 K), total size for store is 68.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:24:26,272 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 3081dea91b057915b834d5b83e904f00: 2024-11-18T20:24:26,272 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00., storeName=3081dea91b057915b834d5b83e904f00/info, priority=13, startTime=1731961466240; duration=0sec 2024-11-18T20:24:26,273 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=68.3 K, sizeToCheck=16.0 K 2024-11-18T20:24:26,273 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:24:26,273 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/1011320473f44f9296269365d506fa10 because midkey is the same as first or last row 2024-11-18T20:24:26,273 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=68.3 K, sizeToCheck=16.0 K 2024-11-18T20:24:26,273 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:24:26,273 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/1011320473f44f9296269365d506fa10 because midkey is the same as first or last row 2024-11-18T20:24:26,273 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=68.3 K, sizeToCheck=16.0 K 2024-11-18T20:24:26,273 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:24:26,273 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/1011320473f44f9296269365d506fa10 because midkey is the same as first or last row 2024-11-18T20:24:26,273 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:26,273 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3081dea91b057915b834d5b83e904f00:info 2024-11-18T20:24:26,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:26,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:27,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:27,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:28,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:28,228 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3081dea91b057915b834d5b83e904f00 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T20:24:28,232 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/4bd68e11a35e4040bc47f62b9ed5bacf is 1080, key is row0061/info:/1731961466211/Put/seqid=0 2024-11-18T20:24:28,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741844_1020 (size=12509) 2024-11-18T20:24:28,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741844_1020 (size=12509) 2024-11-18T20:24:28,242 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/4bd68e11a35e4040bc47f62b9ed5bacf 2024-11-18T20:24:28,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/4bd68e11a35e4040bc47f62b9ed5bacf as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/4bd68e11a35e4040bc47f62b9ed5bacf 2024-11-18T20:24:28,255 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/4bd68e11a35e4040bc47f62b9ed5bacf, entries=7, sequenceid=88, filesize=12.2 K 2024-11-18T20:24:28,256 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 3081dea91b057915b834d5b83e904f00 in 28ms, sequenceid=88, compaction requested=false 2024-11-18T20:24:28,256 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3081dea91b057915b834d5b83e904f00: 2024-11-18T20:24:28,256 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=80.5 K, sizeToCheck=16.0 K 2024-11-18T20:24:28,256 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:24:28,256 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/1011320473f44f9296269365d506fa10 because midkey is the same as first or last row 2024-11-18T20:24:28,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:28,257 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3081dea91b057915b834d5b83e904f00 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-18T20:24:28,262 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/9c13a4da62684ff2a5e76b0ccadb8770 is 1080, key is row0068/info:/1731961468229/Put/seqid=0 2024-11-18T20:24:28,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741845_1021 (size=18987) 2024-11-18T20:24:28,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741845_1021 (size=18987) 2024-11-18T20:24:28,267 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/9c13a4da62684ff2a5e76b0ccadb8770 2024-11-18T20:24:28,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/9c13a4da62684ff2a5e76b0ccadb8770 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/9c13a4da62684ff2a5e76b0ccadb8770 2024-11-18T20:24:28,280 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/9c13a4da62684ff2a5e76b0ccadb8770, entries=13, sequenceid=104, filesize=18.5 K 2024-11-18T20:24:28,281 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=10.51 KB/10760 for 3081dea91b057915b834d5b83e904f00 in 24ms, sequenceid=104, compaction requested=true 2024-11-18T20:24:28,281 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3081dea91b057915b834d5b83e904f00: 2024-11-18T20:24:28,281 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=99.0 K, sizeToCheck=16.0 K 2024-11-18T20:24:28,281 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:24:28,281 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/1011320473f44f9296269365d506fa10 because midkey is the same as first or last row 2024-11-18T20:24:28,281 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3081dea91b057915b834d5b83e904f00:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:24:28,281 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:28,281 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:24:28,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:28,282 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3081dea91b057915b834d5b83e904f00 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-18T20:24:28,283 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101416 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:24:28,283 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1541): 3081dea91b057915b834d5b83e904f00/info is initiating minor compaction (all files) 2024-11-18T20:24:28,283 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 3081dea91b057915b834d5b83e904f00/info in TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00. 2024-11-18T20:24:28,283 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/1011320473f44f9296269365d506fa10, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/4bd68e11a35e4040bc47f62b9ed5bacf, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/9c13a4da62684ff2a5e76b0ccadb8770] into tmpdir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp, totalSize=99.0 K 2024-11-18T20:24:28,284 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1011320473f44f9296269365d506fa10, keycount=60, bloomtype=ROW, size=68.3 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1731961464062 2024-11-18T20:24:28,284 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4bd68e11a35e4040bc47f62b9ed5bacf, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1731961466211 2024-11-18T20:24:28,284 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9c13a4da62684ff2a5e76b0ccadb8770, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1731961468229 2024-11-18T20:24:28,287 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/562355dbfce544b192d5d64c60e786a7 is 1080, key is row0081/info:/1731961468259/Put/seqid=0 2024-11-18T20:24:28,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741846_1022 (size=16817) 2024-11-18T20:24:28,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741846_1022 (size=16817) 2024-11-18T20:24:28,293 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/562355dbfce544b192d5d64c60e786a7 2024-11-18T20:24:28,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/562355dbfce544b192d5d64c60e786a7 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/562355dbfce544b192d5d64c60e786a7 2024-11-18T20:24:28,299 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3081dea91b057915b834d5b83e904f00#info#compaction#65 average throughput is 41.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:24:28,299 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/1c90ebfc69b44c91b9d7685cc7a4548b is 1080, key is row0001/info:/1731961464062/Put/seqid=0 2024-11-18T20:24:28,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741847_1023 (size=91639) 2024-11-18T20:24:28,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741847_1023 (size=91639) 2024-11-18T20:24:28,304 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/562355dbfce544b192d5d64c60e786a7, entries=11, sequenceid=118, filesize=16.4 K 2024-11-18T20:24:28,305 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=5.25 KB/5380 for 3081dea91b057915b834d5b83e904f00 in 24ms, sequenceid=118, compaction requested=false 2024-11-18T20:24:28,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3081dea91b057915b834d5b83e904f00: 2024-11-18T20:24:28,305 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=115.5 K, sizeToCheck=16.0 K 2024-11-18T20:24:28,305 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:24:28,305 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/1011320473f44f9296269365d506fa10 because midkey is the same as first or last row 2024-11-18T20:24:28,308 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/1c90ebfc69b44c91b9d7685cc7a4548b as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/1c90ebfc69b44c91b9d7685cc7a4548b 2024-11-18T20:24:28,314 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 3081dea91b057915b834d5b83e904f00/info of 3081dea91b057915b834d5b83e904f00 into 1c90ebfc69b44c91b9d7685cc7a4548b(size=89.5 K), total size for store is 105.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:24:28,314 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 3081dea91b057915b834d5b83e904f00: 2024-11-18T20:24:28,314 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00., storeName=3081dea91b057915b834d5b83e904f00/info, priority=13, startTime=1731961468281; duration=0sec 2024-11-18T20:24:28,314 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=105.9 K, sizeToCheck=16.0 K 2024-11-18T20:24:28,314 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:24:28,314 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=105.9 K, sizeToCheck=16.0 K 2024-11-18T20:24:28,314 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:24:28,314 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=105.9 K, sizeToCheck=16.0 K 2024-11-18T20:24:28,314 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:24:28,315 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:28,315 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:28,315 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3081dea91b057915b834d5b83e904f00:info 2024-11-18T20:24:28,316 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44881 {}] assignment.AssignmentManager(1355): Split request from 5a964fc427ed,37687,1731961452840, parent={ENCODED => 3081dea91b057915b834d5b83e904f00, NAME => 'TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-18T20:24:28,321 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44881 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=5a964fc427ed,37687,1731961452840 2024-11-18T20:24:28,324 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44881 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=3081dea91b057915b834d5b83e904f00, daughterA=b1ea4d159b7999e1e5347a6755cf09c6, daughterB=49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:28,325 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=3081dea91b057915b834d5b83e904f00, daughterA=b1ea4d159b7999e1e5347a6755cf09c6, daughterB=49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:28,325 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=3081dea91b057915b834d5b83e904f00, daughterA=b1ea4d159b7999e1e5347a6755cf09c6, daughterB=49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:28,325 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=3081dea91b057915b834d5b83e904f00, daughterA=b1ea4d159b7999e1e5347a6755cf09c6, daughterB=49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:28,331 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3081dea91b057915b834d5b83e904f00, UNASSIGN}] 2024-11-18T20:24:28,332 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3081dea91b057915b834d5b83e904f00, UNASSIGN 2024-11-18T20:24:28,334 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=3081dea91b057915b834d5b83e904f00, regionState=CLOSING, regionLocation=5a964fc427ed,37687,1731961452840 2024-11-18T20:24:28,336 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3081dea91b057915b834d5b83e904f00, UNASSIGN because future has completed 2024-11-18T20:24:28,336 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-18T20:24:28,336 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3081dea91b057915b834d5b83e904f00, server=5a964fc427ed,37687,1731961452840}] 2024-11-18T20:24:28,493 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:28,493 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-18T20:24:28,493 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 3081dea91b057915b834d5b83e904f00, disabling compactions & flushes 2024-11-18T20:24:28,493 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00. 2024-11-18T20:24:28,493 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00. 2024-11-18T20:24:28,493 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00. after waiting 0 ms 2024-11-18T20:24:28,493 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00. 2024-11-18T20:24:28,494 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 3081dea91b057915b834d5b83e904f00 1/1 column families, dataSize=5.25 KB heapSize=5.88 KB 2024-11-18T20:24:28,498 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/de5ce08ef7c642cda0860d666a977562 is 1080, key is row0092/info:/1731961468283/Put/seqid=0 2024-11-18T20:24:28,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741848_1024 (size=10347) 2024-11-18T20:24:28,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741848_1024 (size=10347) 2024-11-18T20:24:28,505 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.25 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/de5ce08ef7c642cda0860d666a977562 2024-11-18T20:24:28,511 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/.tmp/info/de5ce08ef7c642cda0860d666a977562 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/de5ce08ef7c642cda0860d666a977562 2024-11-18T20:24:28,517 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/de5ce08ef7c642cda0860d666a977562, entries=5, sequenceid=127, filesize=10.1 K 2024-11-18T20:24:28,519 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for 3081dea91b057915b834d5b83e904f00 in 24ms, sequenceid=127, compaction requested=true 2024-11-18T20:24:28,521 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/978d6aad42154c539030ce8b39ba8506, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/c965756ffac7438c96051ebaa54179f5, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/18b6c82e88d447ccb4008b94de9f53ab, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/e12e426c89e74c798ed4fb54d6683177, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/b1e644e0370741b1baaaea92d0cb6339, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/1011320473f44f9296269365d506fa10, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/0b9097f8a8d3419eada6fa71f749bf93, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/4bd68e11a35e4040bc47f62b9ed5bacf, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/9c13a4da62684ff2a5e76b0ccadb8770] to archive 2024-11-18T20:24:28,522 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T20:24:28,525 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/978d6aad42154c539030ce8b39ba8506 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/978d6aad42154c539030ce8b39ba8506 2024-11-18T20:24:28,527 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/c965756ffac7438c96051ebaa54179f5 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/c965756ffac7438c96051ebaa54179f5 2024-11-18T20:24:28,529 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/18b6c82e88d447ccb4008b94de9f53ab to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/18b6c82e88d447ccb4008b94de9f53ab 2024-11-18T20:24:28,531 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/e12e426c89e74c798ed4fb54d6683177 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/e12e426c89e74c798ed4fb54d6683177 2024-11-18T20:24:28,533 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/b1e644e0370741b1baaaea92d0cb6339 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/b1e644e0370741b1baaaea92d0cb6339 2024-11-18T20:24:28,536 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/1011320473f44f9296269365d506fa10 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/1011320473f44f9296269365d506fa10 2024-11-18T20:24:28,538 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/0b9097f8a8d3419eada6fa71f749bf93 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/0b9097f8a8d3419eada6fa71f749bf93 2024-11-18T20:24:28,541 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/4bd68e11a35e4040bc47f62b9ed5bacf to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/4bd68e11a35e4040bc47f62b9ed5bacf 2024-11-18T20:24:28,543 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/9c13a4da62684ff2a5e76b0ccadb8770 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/9c13a4da62684ff2a5e76b0ccadb8770 2024-11-18T20:24:28,572 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=1 2024-11-18T20:24:28,573 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00. 2024-11-18T20:24:28,573 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 3081dea91b057915b834d5b83e904f00: Waiting for close lock at 1731961468493Running coprocessor pre-close hooks at 1731961468493Disabling compacts and flushes for region at 1731961468493Disabling writes for close at 1731961468493Obtaining lock to block concurrent updates at 1731961468494 (+1 ms)Preparing flush snapshotting stores in 3081dea91b057915b834d5b83e904f00 at 1731961468494Finished memstore snapshotting TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00., syncing WAL and waiting on mvcc, flushsize=dataSize=5380, getHeapSize=6000, getOffHeapSize=0, getCellsCount=5 at 1731961468494Flushing stores of TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00. at 1731961468495 (+1 ms)Flushing 3081dea91b057915b834d5b83e904f00/info: creating writer at 1731961468495Flushing 3081dea91b057915b834d5b83e904f00/info: appending metadata at 1731961468498 (+3 ms)Flushing 3081dea91b057915b834d5b83e904f00/info: closing flushed file at 1731961468498Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67e1939: reopening flushed file at 1731961468510 (+12 ms)Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for 3081dea91b057915b834d5b83e904f00 in 24ms, sequenceid=127, compaction requested=true at 1731961468519 (+9 ms)Writing region close event to WAL at 1731961468552 (+33 ms)Running coprocessor post-close hooks at 1731961468573 (+21 ms)Closed at 1731961468573 2024-11-18T20:24:28,579 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:28,580 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=3081dea91b057915b834d5b83e904f00, regionState=CLOSED 2024-11-18T20:24:28,583 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3081dea91b057915b834d5b83e904f00, server=5a964fc427ed,37687,1731961452840 because future has completed 2024-11-18T20:24:28,588 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-18T20:24:28,589 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 3081dea91b057915b834d5b83e904f00, server=5a964fc427ed,37687,1731961452840 in 249 msec 2024-11-18T20:24:28,591 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-18T20:24:28,591 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3081dea91b057915b834d5b83e904f00, UNASSIGN in 258 msec 2024-11-18T20:24:28,614 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:28,622 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=3081dea91b057915b834d5b83e904f00, threads=3 2024-11-18T20:24:28,627 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/562355dbfce544b192d5d64c60e786a7 for region: 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:28,627 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/de5ce08ef7c642cda0860d666a977562 for region: 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:28,627 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/1c90ebfc69b44c91b9d7685cc7a4548b for region: 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:28,654 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/562355dbfce544b192d5d64c60e786a7, top=true 2024-11-18T20:24:28,658 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/de5ce08ef7c642cda0860d666a977562, top=true 2024-11-18T20:24:28,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741849_1025 (size=27) 2024-11-18T20:24:28,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741849_1025 (size=27) 2024-11-18T20:24:28,700 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/TestLogRolling-testLogRolling=3081dea91b057915b834d5b83e904f00-562355dbfce544b192d5d64c60e786a7 for child: 49ba49f4352eb74e2d81daec4da3f4b0, parent: 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:28,700 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/562355dbfce544b192d5d64c60e786a7 for region: 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:28,715 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/TestLogRolling-testLogRolling=3081dea91b057915b834d5b83e904f00-de5ce08ef7c642cda0860d666a977562 for child: 49ba49f4352eb74e2d81daec4da3f4b0, parent: 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:28,715 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/de5ce08ef7c642cda0860d666a977562 for region: 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:28,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741850_1026 (size=27) 2024-11-18T20:24:28,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741850_1026 (size=27) 2024-11-18T20:24:28,718 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/1c90ebfc69b44c91b9d7685cc7a4548b for region: 3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:28,720 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 3081dea91b057915b834d5b83e904f00 Daughter A: [hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/b1ea4d159b7999e1e5347a6755cf09c6/info/1c90ebfc69b44c91b9d7685cc7a4548b.3081dea91b057915b834d5b83e904f00] storefiles, Daughter B: [hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1c90ebfc69b44c91b9d7685cc7a4548b.3081dea91b057915b834d5b83e904f00, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/TestLogRolling-testLogRolling=3081dea91b057915b834d5b83e904f00-562355dbfce544b192d5d64c60e786a7, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/TestLogRolling-testLogRolling=3081dea91b057915b834d5b83e904f00-de5ce08ef7c642cda0860d666a977562] storefiles. 2024-11-18T20:24:28,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741851_1027 (size=71) 2024-11-18T20:24:28,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741851_1027 (size=71) 2024-11-18T20:24:28,731 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:28,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741852_1028 (size=71) 2024-11-18T20:24:28,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741852_1028 (size=71) 2024-11-18T20:24:28,745 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:28,757 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/b1ea4d159b7999e1e5347a6755cf09c6/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-18T20:24:28,760 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-18T20:24:28,762 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731961468762"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731961468762"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731961468762"}]},"ts":"1731961468762"} 2024-11-18T20:24:28,763 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731961468321.b1ea4d159b7999e1e5347a6755cf09c6.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731961468762"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731961468762"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731961468762"}]},"ts":"1731961468762"} 2024-11-18T20:24:28,763 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731961468762"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731961468762"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731961468762"}]},"ts":"1731961468762"} 2024-11-18T20:24:28,785 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b1ea4d159b7999e1e5347a6755cf09c6, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49ba49f4352eb74e2d81daec4da3f4b0, ASSIGN}] 2024-11-18T20:24:28,787 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b1ea4d159b7999e1e5347a6755cf09c6, ASSIGN 2024-11-18T20:24:28,788 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49ba49f4352eb74e2d81daec4da3f4b0, ASSIGN 2024-11-18T20:24:28,789 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b1ea4d159b7999e1e5347a6755cf09c6, ASSIGN; state=SPLITTING_NEW, location=5a964fc427ed,37687,1731961452840; forceNewPlan=false, retain=false 2024-11-18T20:24:28,789 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49ba49f4352eb74e2d81daec4da3f4b0, ASSIGN; state=SPLITTING_NEW, location=5a964fc427ed,37687,1731961452840; forceNewPlan=false, retain=false 2024-11-18T20:24:28,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:28,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:28,940 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=b1ea4d159b7999e1e5347a6755cf09c6, regionState=OPENING, regionLocation=5a964fc427ed,37687,1731961452840 2024-11-18T20:24:28,940 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=49ba49f4352eb74e2d81daec4da3f4b0, regionState=OPENING, regionLocation=5a964fc427ed,37687,1731961452840 2024-11-18T20:24:28,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b1ea4d159b7999e1e5347a6755cf09c6, ASSIGN because future has completed 2024-11-18T20:24:28,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure b1ea4d159b7999e1e5347a6755cf09c6, server=5a964fc427ed,37687,1731961452840}] 2024-11-18T20:24:28,943 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49ba49f4352eb74e2d81daec4da3f4b0, ASSIGN because future has completed 2024-11-18T20:24:28,944 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 49ba49f4352eb74e2d81daec4da3f4b0, server=5a964fc427ed,37687,1731961452840}] 2024-11-18T20:24:29,102 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0. 2024-11-18T20:24:29,103 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 49ba49f4352eb74e2d81daec4da3f4b0, NAME => 'TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-18T20:24:29,103 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:29,103 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:24:29,103 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:29,103 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:29,105 INFO [StoreOpener-49ba49f4352eb74e2d81daec4da3f4b0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:29,106 INFO [StoreOpener-49ba49f4352eb74e2d81daec4da3f4b0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 49ba49f4352eb74e2d81daec4da3f4b0 columnFamilyName info 2024-11-18T20:24:29,106 DEBUG [StoreOpener-49ba49f4352eb74e2d81daec4da3f4b0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:29,121 DEBUG [StoreOpener-49ba49f4352eb74e2d81daec4da3f4b0-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1c90ebfc69b44c91b9d7685cc7a4548b.3081dea91b057915b834d5b83e904f00->hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/1c90ebfc69b44c91b9d7685cc7a4548b-top 2024-11-18T20:24:29,126 DEBUG [StoreOpener-49ba49f4352eb74e2d81daec4da3f4b0-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/TestLogRolling-testLogRolling=3081dea91b057915b834d5b83e904f00-562355dbfce544b192d5d64c60e786a7 2024-11-18T20:24:29,130 DEBUG [StoreOpener-49ba49f4352eb74e2d81daec4da3f4b0-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/TestLogRolling-testLogRolling=3081dea91b057915b834d5b83e904f00-de5ce08ef7c642cda0860d666a977562 2024-11-18T20:24:29,130 INFO [StoreOpener-49ba49f4352eb74e2d81daec4da3f4b0-1 {}] regionserver.HStore(327): Store=49ba49f4352eb74e2d81daec4da3f4b0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:24:29,131 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:29,131 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:29,132 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:29,133 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:29,133 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:29,134 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:29,135 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 49ba49f4352eb74e2d81daec4da3f4b0; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=844249, jitterRate=0.07351930439472198}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T20:24:29,135 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:29,135 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 49ba49f4352eb74e2d81daec4da3f4b0: Running coprocessor pre-open hook at 1731961469103Writing region info on filesystem at 1731961469103Initializing all the Stores at 1731961469105 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961469105Cleaning up temporary data from old regions at 1731961469133 (+28 ms)Running coprocessor post-open hooks at 1731961469135 (+2 ms)Region opened successfully at 1731961469135 2024-11-18T20:24:29,136 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0., pid=13, masterSystemTime=1731961469096 2024-11-18T20:24:29,137 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 49ba49f4352eb74e2d81daec4da3f4b0:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:24:29,137 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:29,137 DEBUG [RS:0;5a964fc427ed:37687-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:24:29,138 INFO [RS:0;5a964fc427ed:37687-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0. 2024-11-18T20:24:29,138 DEBUG [RS:0;5a964fc427ed:37687-longCompactions-0 {}] regionserver.HStore(1541): 49ba49f4352eb74e2d81daec4da3f4b0/info is initiating minor compaction (all files) 2024-11-18T20:24:29,138 INFO [RS:0;5a964fc427ed:37687-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49ba49f4352eb74e2d81daec4da3f4b0/info in TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0. 2024-11-18T20:24:29,139 INFO [RS:0;5a964fc427ed:37687-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1c90ebfc69b44c91b9d7685cc7a4548b.3081dea91b057915b834d5b83e904f00->hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/1c90ebfc69b44c91b9d7685cc7a4548b-top, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/TestLogRolling-testLogRolling=3081dea91b057915b834d5b83e904f00-562355dbfce544b192d5d64c60e786a7, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/TestLogRolling-testLogRolling=3081dea91b057915b834d5b83e904f00-de5ce08ef7c642cda0860d666a977562] into tmpdir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp, totalSize=116.0 K 2024-11-18T20:24:29,139 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0. 2024-11-18T20:24:29,139 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0. 2024-11-18T20:24:29,139 DEBUG [RS:0;5a964fc427ed:37687-longCompactions-0 {}] compactions.Compactor(225): Compacting 1c90ebfc69b44c91b9d7685cc7a4548b.3081dea91b057915b834d5b83e904f00, keycount=40, bloomtype=ROW, size=89.5 K, encoding=NONE, compression=NONE, seqNum=105, earliestPutTs=1731961464062 2024-11-18T20:24:29,139 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731961468321.b1ea4d159b7999e1e5347a6755cf09c6. 2024-11-18T20:24:29,139 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => b1ea4d159b7999e1e5347a6755cf09c6, NAME => 'TestLogRolling-testLogRolling,,1731961468321.b1ea4d159b7999e1e5347a6755cf09c6.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-18T20:24:29,140 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling b1ea4d159b7999e1e5347a6755cf09c6 2024-11-18T20:24:29,140 DEBUG [RS:0;5a964fc427ed:37687-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=3081dea91b057915b834d5b83e904f00-562355dbfce544b192d5d64c60e786a7, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1731961468259 2024-11-18T20:24:29,140 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731961468321.b1ea4d159b7999e1e5347a6755cf09c6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:24:29,140 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for b1ea4d159b7999e1e5347a6755cf09c6 2024-11-18T20:24:29,140 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for b1ea4d159b7999e1e5347a6755cf09c6 2024-11-18T20:24:29,140 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=49ba49f4352eb74e2d81daec4da3f4b0, regionState=OPEN, openSeqNum=131, regionLocation=5a964fc427ed,37687,1731961452840 2024-11-18T20:24:29,140 DEBUG [RS:0;5a964fc427ed:37687-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=3081dea91b057915b834d5b83e904f00-de5ce08ef7c642cda0860d666a977562, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731961468283 2024-11-18T20:24:29,141 INFO [StoreOpener-b1ea4d159b7999e1e5347a6755cf09c6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b1ea4d159b7999e1e5347a6755cf09c6 2024-11-18T20:24:29,142 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-18T20:24:29,142 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-18T20:24:29,142 INFO [StoreOpener-b1ea4d159b7999e1e5347a6755cf09c6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b1ea4d159b7999e1e5347a6755cf09c6 columnFamilyName info 2024-11-18T20:24:29,142 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-18T20:24:29,142 DEBUG [StoreOpener-b1ea4d159b7999e1e5347a6755cf09c6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:29,142 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 49ba49f4352eb74e2d81daec4da3f4b0, server=5a964fc427ed,37687,1731961452840 because future has completed 2024-11-18T20:24:29,147 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-18T20:24:29,147 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 49ba49f4352eb74e2d81daec4da3f4b0, server=5a964fc427ed,37687,1731961452840 in 200 msec 2024-11-18T20:24:29,150 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49ba49f4352eb74e2d81daec4da3f4b0, ASSIGN in 362 msec 2024-11-18T20:24:29,157 DEBUG [StoreOpener-b1ea4d159b7999e1e5347a6755cf09c6-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/b1ea4d159b7999e1e5347a6755cf09c6/info/1c90ebfc69b44c91b9d7685cc7a4548b.3081dea91b057915b834d5b83e904f00->hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/1c90ebfc69b44c91b9d7685cc7a4548b-bottom 2024-11-18T20:24:29,158 INFO [StoreOpener-b1ea4d159b7999e1e5347a6755cf09c6-1 {}] regionserver.HStore(327): Store=b1ea4d159b7999e1e5347a6755cf09c6/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:24:29,158 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for b1ea4d159b7999e1e5347a6755cf09c6 2024-11-18T20:24:29,159 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/b1ea4d159b7999e1e5347a6755cf09c6 2024-11-18T20:24:29,160 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740/.tmp/info/387b6b85d58e47b0b04e81fa8557aaae is 193, key is TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0./info:regioninfo/1731961469140/Put/seqid=0 2024-11-18T20:24:29,160 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/b1ea4d159b7999e1e5347a6755cf09c6 2024-11-18T20:24:29,160 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for b1ea4d159b7999e1e5347a6755cf09c6 2024-11-18T20:24:29,160 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for b1ea4d159b7999e1e5347a6755cf09c6 2024-11-18T20:24:29,163 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for b1ea4d159b7999e1e5347a6755cf09c6 2024-11-18T20:24:29,164 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened b1ea4d159b7999e1e5347a6755cf09c6; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=856805, jitterRate=0.08948467671871185}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T20:24:29,164 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b1ea4d159b7999e1e5347a6755cf09c6 2024-11-18T20:24:29,164 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for b1ea4d159b7999e1e5347a6755cf09c6: Running coprocessor pre-open hook at 1731961469140Writing region info on filesystem at 1731961469140Initializing all the Stores at 1731961469140Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961469140Cleaning up temporary data from old regions at 1731961469160 (+20 ms)Running coprocessor post-open hooks at 1731961469164 (+4 ms)Region opened successfully at 1731961469164 2024-11-18T20:24:29,165 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731961468321.b1ea4d159b7999e1e5347a6755cf09c6., pid=12, masterSystemTime=1731961469096 2024-11-18T20:24:29,165 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store b1ea4d159b7999e1e5347a6755cf09c6:info, priority=-2147483648, current under compaction store size is 2 2024-11-18T20:24:29,165 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:29,165 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-18T20:24:29,166 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731961468321.b1ea4d159b7999e1e5347a6755cf09c6. 2024-11-18T20:24:29,166 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1541): b1ea4d159b7999e1e5347a6755cf09c6/info is initiating minor compaction (all files) 2024-11-18T20:24:29,166 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b1ea4d159b7999e1e5347a6755cf09c6/info in TestLogRolling-testLogRolling,,1731961468321.b1ea4d159b7999e1e5347a6755cf09c6. 2024-11-18T20:24:29,167 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/b1ea4d159b7999e1e5347a6755cf09c6/info/1c90ebfc69b44c91b9d7685cc7a4548b.3081dea91b057915b834d5b83e904f00->hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/1c90ebfc69b44c91b9d7685cc7a4548b-bottom] into tmpdir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/b1ea4d159b7999e1e5347a6755cf09c6/.tmp, totalSize=89.5 K 2024-11-18T20:24:29,167 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1c90ebfc69b44c91b9d7685cc7a4548b.3081dea91b057915b834d5b83e904f00, keycount=40, bloomtype=ROW, size=89.5 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1731961464062 2024-11-18T20:24:29,168 DEBUG [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731961468321.b1ea4d159b7999e1e5347a6755cf09c6. 2024-11-18T20:24:29,168 INFO [RS_OPEN_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731961468321.b1ea4d159b7999e1e5347a6755cf09c6. 2024-11-18T20:24:29,168 INFO [RS:0;5a964fc427ed:37687-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49ba49f4352eb74e2d81daec4da3f4b0#info#compaction#68 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:24:29,169 DEBUG [RS:0;5a964fc427ed:37687-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/05e7644b891149dea616f123726808b4 is 1080, key is row0062/info:/1731961466215/Put/seqid=0 2024-11-18T20:24:29,169 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=b1ea4d159b7999e1e5347a6755cf09c6, regionState=OPEN, openSeqNum=131, regionLocation=5a964fc427ed,37687,1731961452840 2024-11-18T20:24:29,169 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44881 {}] assignment.AssignmentManager(1535): Unable to acquire lock for regionNode state=OPEN, location=5a964fc427ed,37687,1731961452840, table=TestLogRolling-testLogRolling, region=b1ea4d159b7999e1e5347a6755cf09c6. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-18T20:24:29,171 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure b1ea4d159b7999e1e5347a6755cf09c6, server=5a964fc427ed,37687,1731961452840 because future has completed 2024-11-18T20:24:29,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741853_1029 (size=9882) 2024-11-18T20:24:29,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741853_1029 (size=9882) 2024-11-18T20:24:29,175 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740/.tmp/info/387b6b85d58e47b0b04e81fa8557aaae 2024-11-18T20:24:29,180 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-18T20:24:29,180 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure b1ea4d159b7999e1e5347a6755cf09c6, server=5a964fc427ed,37687,1731961452840 in 233 msec 2024-11-18T20:24:29,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741854_1030 (size=42984) 2024-11-18T20:24:29,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741854_1030 (size=42984) 2024-11-18T20:24:29,184 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-11-18T20:24:29,184 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b1ea4d159b7999e1e5347a6755cf09c6, ASSIGN in 395 msec 2024-11-18T20:24:29,186 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=3081dea91b057915b834d5b83e904f00, daughterA=b1ea4d159b7999e1e5347a6755cf09c6, daughterB=49ba49f4352eb74e2d81daec4da3f4b0 in 863 msec 2024-11-18T20:24:29,190 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b1ea4d159b7999e1e5347a6755cf09c6#info#compaction#69 average throughput is 62.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:24:29,191 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/b1ea4d159b7999e1e5347a6755cf09c6/.tmp/info/336edadf03b4433cab3c0fb8adeebf7e is 1080, key is row0001/info:/1731961464062/Put/seqid=0 2024-11-18T20:24:29,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741855_1031 (size=70862) 2024-11-18T20:24:29,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741855_1031 (size=70862) 2024-11-18T20:24:29,200 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740/.tmp/ns/08079887732847f685f87674808feefd is 43, key is default/ns:d/1731961453852/Put/seqid=0 2024-11-18T20:24:29,203 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/b1ea4d159b7999e1e5347a6755cf09c6/.tmp/info/336edadf03b4433cab3c0fb8adeebf7e as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/b1ea4d159b7999e1e5347a6755cf09c6/info/336edadf03b4433cab3c0fb8adeebf7e 2024-11-18T20:24:29,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741856_1032 (size=5153) 2024-11-18T20:24:29,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741856_1032 (size=5153) 2024-11-18T20:24:29,209 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740/.tmp/ns/08079887732847f685f87674808feefd 2024-11-18T20:24:29,210 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in b1ea4d159b7999e1e5347a6755cf09c6/info of b1ea4d159b7999e1e5347a6755cf09c6 into 336edadf03b4433cab3c0fb8adeebf7e(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:24:29,210 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b1ea4d159b7999e1e5347a6755cf09c6: 2024-11-18T20:24:29,210 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731961468321.b1ea4d159b7999e1e5347a6755cf09c6., storeName=b1ea4d159b7999e1e5347a6755cf09c6/info, priority=15, startTime=1731961469165; duration=0sec 2024-11-18T20:24:29,211 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:29,211 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b1ea4d159b7999e1e5347a6755cf09c6:info 2024-11-18T20:24:29,227 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740/.tmp/table/d6a4e9d92bad450d9db75499d2b685b5 is 65, key is TestLogRolling-testLogRolling/table:state/1731961454362/Put/seqid=0 2024-11-18T20:24:29,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741857_1033 (size=5340) 2024-11-18T20:24:29,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741857_1033 (size=5340) 2024-11-18T20:24:29,232 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740/.tmp/table/d6a4e9d92bad450d9db75499d2b685b5 2024-11-18T20:24:29,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740/.tmp/info/387b6b85d58e47b0b04e81fa8557aaae as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740/info/387b6b85d58e47b0b04e81fa8557aaae 2024-11-18T20:24:29,244 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740/info/387b6b85d58e47b0b04e81fa8557aaae, entries=30, sequenceid=17, filesize=9.7 K 2024-11-18T20:24:29,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740/.tmp/ns/08079887732847f685f87674808feefd as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740/ns/08079887732847f685f87674808feefd 2024-11-18T20:24:29,250 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740/ns/08079887732847f685f87674808feefd, entries=2, sequenceid=17, filesize=5.0 K 2024-11-18T20:24:29,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740/.tmp/table/d6a4e9d92bad450d9db75499d2b685b5 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740/table/d6a4e9d92bad450d9db75499d2b685b5 2024-11-18T20:24:29,257 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740/table/d6a4e9d92bad450d9db75499d2b685b5, entries=2, sequenceid=17, filesize=5.2 K 2024-11-18T20:24:29,258 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 116ms, sequenceid=17, compaction requested=false 2024-11-18T20:24:29,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-18T20:24:29,592 DEBUG [RS:0;5a964fc427ed:37687-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/05e7644b891149dea616f123726808b4 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/05e7644b891149dea616f123726808b4 2024-11-18T20:24:29,600 INFO [RS:0;5a964fc427ed:37687-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49ba49f4352eb74e2d81daec4da3f4b0/info of 49ba49f4352eb74e2d81daec4da3f4b0 into 05e7644b891149dea616f123726808b4(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:24:29,600 DEBUG [RS:0;5a964fc427ed:37687-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:29,600 INFO [RS:0;5a964fc427ed:37687-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0., storeName=49ba49f4352eb74e2d81daec4da3f4b0/info, priority=13, startTime=1731961469137; duration=0sec 2024-11-18T20:24:29,600 DEBUG [RS:0;5a964fc427ed:37687-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:29,600 DEBUG [RS:0;5a964fc427ed:37687-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49ba49f4352eb74e2d81daec4da3f4b0:info 2024-11-18T20:24:29,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:29,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:30,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:60200 deadline: 1731961480296, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00. is not online on 5a964fc427ed,37687,1731961452840 2024-11-18T20:24:30,323 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00., hostname=5a964fc427ed,37687,1731961452840, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00., hostname=5a964fc427ed,37687,1731961452840, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00. is not online on 5a964fc427ed,37687,1731961452840 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-18T20:24:30,324 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00., hostname=5a964fc427ed,37687,1731961452840, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00. is not online on 5a964fc427ed,37687,1731961452840 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-18T20:24:30,324 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731961453983.3081dea91b057915b834d5b83e904f00., hostname=5a964fc427ed,37687,1731961452840, seqNum=2 from cache 2024-11-18T20:24:30,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:30,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:31,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:31,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:32,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:32,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:33,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:33,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:33,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:33,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:33,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:33,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:33,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:33,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:33,604 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:33,604 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:33,604 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:33,605 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:33,605 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:33,605 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:33,609 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:33,609 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:33,610 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:33,612 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:33,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:33,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:34,122 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T20:24:34,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:34,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:34,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:34,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:34,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:34,125 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:34,126 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:34,126 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:34,150 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:34,150 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:34,151 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:34,151 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:34,151 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:34,151 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:34,155 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:34,155 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:34,155 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:34,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:24:34,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:34,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:35,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:35,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:36,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:36,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:37,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:37,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:38,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:38,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:39,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:39,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:40,436 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0., hostname=5a964fc427ed,37687,1731961452840, seqNum=131] 2024-11-18T20:24:40,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:40,448 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ba49f4352eb74e2d81daec4da3f4b0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T20:24:40,453 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/1e5e729f6fbf442cb24f13f3e86a20e3 is 1080, key is row0097/info:/1731961480438/Put/seqid=0 2024-11-18T20:24:40,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741858_1034 (size=12516) 2024-11-18T20:24:40,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741858_1034 (size=12516) 2024-11-18T20:24:40,461 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/1e5e729f6fbf442cb24f13f3e86a20e3 2024-11-18T20:24:40,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/1e5e729f6fbf442cb24f13f3e86a20e3 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1e5e729f6fbf442cb24f13f3e86a20e3 2024-11-18T20:24:40,474 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1e5e729f6fbf442cb24f13f3e86a20e3, entries=7, sequenceid=141, filesize=12.2 K 2024-11-18T20:24:40,475 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 49ba49f4352eb74e2d81daec4da3f4b0 in 27ms, sequenceid=141, compaction requested=false 2024-11-18T20:24:40,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:40,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:40,475 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ba49f4352eb74e2d81daec4da3f4b0 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-18T20:24:40,479 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/f39b5dfc88e74e468f49037d717eda88 is 1080, key is row0104/info:/1731961480449/Put/seqid=0 2024-11-18T20:24:40,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741859_1035 (size=17906) 2024-11-18T20:24:40,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741859_1035 (size=17906) 2024-11-18T20:24:40,485 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/f39b5dfc88e74e468f49037d717eda88 2024-11-18T20:24:40,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/f39b5dfc88e74e468f49037d717eda88 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/f39b5dfc88e74e468f49037d717eda88 2024-11-18T20:24:40,497 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/f39b5dfc88e74e468f49037d717eda88, entries=12, sequenceid=156, filesize=17.5 K 2024-11-18T20:24:40,498 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 49ba49f4352eb74e2d81daec4da3f4b0 in 23ms, sequenceid=156, compaction requested=true 2024-11-18T20:24:40,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:40,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49ba49f4352eb74e2d81daec4da3f4b0:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:24:40,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:40,499 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:24:40,500 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 73406 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:24:40,500 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1541): 49ba49f4352eb74e2d81daec4da3f4b0/info is initiating minor compaction (all files) 2024-11-18T20:24:40,500 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49ba49f4352eb74e2d81daec4da3f4b0/info in TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0. 2024-11-18T20:24:40,500 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/05e7644b891149dea616f123726808b4, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1e5e729f6fbf442cb24f13f3e86a20e3, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/f39b5dfc88e74e468f49037d717eda88] into tmpdir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp, totalSize=71.7 K 2024-11-18T20:24:40,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:40,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ba49f4352eb74e2d81daec4da3f4b0 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-18T20:24:40,501 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 05e7644b891149dea616f123726808b4, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731961466215 2024-11-18T20:24:40,501 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1e5e729f6fbf442cb24f13f3e86a20e3, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1731961480438 2024-11-18T20:24:40,501 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting f39b5dfc88e74e468f49037d717eda88, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731961480449 2024-11-18T20:24:40,504 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/150260dceb8f40ce9d2822e53039d2bf is 1080, key is row0116/info:/1731961480477/Put/seqid=0 2024-11-18T20:24:40,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741860_1036 (size=17906) 2024-11-18T20:24:40,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741860_1036 (size=17906) 2024-11-18T20:24:40,511 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/150260dceb8f40ce9d2822e53039d2bf 2024-11-18T20:24:40,514 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49ba49f4352eb74e2d81daec4da3f4b0#info#compaction#75 average throughput is 27.71 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:24:40,514 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/59cdfbce519f40b9beae21f8b8b34ea6 is 1080, key is row0062/info:/1731961466215/Put/seqid=0 2024-11-18T20:24:40,517 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/150260dceb8f40ce9d2822e53039d2bf as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/150260dceb8f40ce9d2822e53039d2bf 2024-11-18T20:24:40,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741861_1037 (size=63636) 2024-11-18T20:24:40,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741861_1037 (size=63636) 2024-11-18T20:24:40,522 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/150260dceb8f40ce9d2822e53039d2bf, entries=12, sequenceid=171, filesize=17.5 K 2024-11-18T20:24:40,523 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=1.05 KB/1076 for 49ba49f4352eb74e2d81daec4da3f4b0 in 22ms, sequenceid=171, compaction requested=false 2024-11-18T20:24:40,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:40,524 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/59cdfbce519f40b9beae21f8b8b34ea6 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/59cdfbce519f40b9beae21f8b8b34ea6 2024-11-18T20:24:40,530 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49ba49f4352eb74e2d81daec4da3f4b0/info of 49ba49f4352eb74e2d81daec4da3f4b0 into 59cdfbce519f40b9beae21f8b8b34ea6(size=62.1 K), total size for store is 79.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:24:40,530 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:40,530 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0., storeName=49ba49f4352eb74e2d81daec4da3f4b0/info, priority=13, startTime=1731961480499; duration=0sec 2024-11-18T20:24:40,530 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:40,530 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49ba49f4352eb74e2d81daec4da3f4b0:info 2024-11-18T20:24:40,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:40,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:41,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:41,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:42,241 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T20:24:42,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:42,518 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ba49f4352eb74e2d81daec4da3f4b0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T20:24:42,523 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/2832664f8ab44fe3b7d591555adf1fb2 is 1080, key is row0128/info:/1731961480502/Put/seqid=0 2024-11-18T20:24:42,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741862_1038 (size=12516) 2024-11-18T20:24:42,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741862_1038 (size=12516) 2024-11-18T20:24:42,528 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=182 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/2832664f8ab44fe3b7d591555adf1fb2 2024-11-18T20:24:42,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/2832664f8ab44fe3b7d591555adf1fb2 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/2832664f8ab44fe3b7d591555adf1fb2 2024-11-18T20:24:42,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/2832664f8ab44fe3b7d591555adf1fb2, entries=7, sequenceid=182, filesize=12.2 K 2024-11-18T20:24:42,540 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 49ba49f4352eb74e2d81daec4da3f4b0 in 22ms, sequenceid=182, compaction requested=true 2024-11-18T20:24:42,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:42,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49ba49f4352eb74e2d81daec4da3f4b0:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:24:42,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:42,540 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:24:42,541 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94058 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:24:42,542 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1541): 49ba49f4352eb74e2d81daec4da3f4b0/info is initiating minor compaction (all files) 2024-11-18T20:24:42,542 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49ba49f4352eb74e2d81daec4da3f4b0/info in TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0. 2024-11-18T20:24:42,542 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/59cdfbce519f40b9beae21f8b8b34ea6, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/150260dceb8f40ce9d2822e53039d2bf, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/2832664f8ab44fe3b7d591555adf1fb2] into tmpdir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp, totalSize=91.9 K 2024-11-18T20:24:42,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:42,542 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ba49f4352eb74e2d81daec4da3f4b0 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-18T20:24:42,542 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 59cdfbce519f40b9beae21f8b8b34ea6, keycount=54, bloomtype=ROW, size=62.1 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731961466215 2024-11-18T20:24:42,542 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 150260dceb8f40ce9d2822e53039d2bf, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1731961480477 2024-11-18T20:24:42,543 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2832664f8ab44fe3b7d591555adf1fb2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1731961480502 2024-11-18T20:24:42,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/7758b991718e40d28f47e0376f6b2cd3 is 1080, key is row0135/info:/1731961482519/Put/seqid=0 2024-11-18T20:24:42,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741863_1039 (size=17906) 2024-11-18T20:24:42,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741863_1039 (size=17906) 2024-11-18T20:24:42,551 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/7758b991718e40d28f47e0376f6b2cd3 2024-11-18T20:24:42,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/7758b991718e40d28f47e0376f6b2cd3 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/7758b991718e40d28f47e0376f6b2cd3 2024-11-18T20:24:42,560 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49ba49f4352eb74e2d81daec4da3f4b0#info#compaction#78 average throughput is 37.45 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:24:42,561 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/8c6083a0bc414bb49539587ee28f64ef is 1080, key is row0062/info:/1731961466215/Put/seqid=0 2024-11-18T20:24:42,563 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/7758b991718e40d28f47e0376f6b2cd3, entries=12, sequenceid=197, filesize=17.5 K 2024-11-18T20:24:42,564 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=9.46 KB/9684 for 49ba49f4352eb74e2d81daec4da3f4b0 in 22ms, sequenceid=197, compaction requested=false 2024-11-18T20:24:42,564 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:42,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:42,566 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ba49f4352eb74e2d81daec4da3f4b0 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-18T20:24:42,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741864_1040 (size=84293) 2024-11-18T20:24:42,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741864_1040 (size=84293) 2024-11-18T20:24:42,572 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/f4dbaf178cd74ac0b9dc967dd21d1e3a is 1080, key is row0147/info:/1731961482543/Put/seqid=0 2024-11-18T20:24:42,575 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/8c6083a0bc414bb49539587ee28f64ef as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/8c6083a0bc414bb49539587ee28f64ef 2024-11-18T20:24:42,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741865_1041 (size=16828) 2024-11-18T20:24:42,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741865_1041 (size=16828) 2024-11-18T20:24:42,581 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49ba49f4352eb74e2d81daec4da3f4b0/info of 49ba49f4352eb74e2d81daec4da3f4b0 into 8c6083a0bc414bb49539587ee28f64ef(size=82.3 K), total size for store is 99.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:24:42,581 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:42,581 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0., storeName=49ba49f4352eb74e2d81daec4da3f4b0/info, priority=13, startTime=1731961482540; duration=0sec 2024-11-18T20:24:42,581 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/f4dbaf178cd74ac0b9dc967dd21d1e3a 2024-11-18T20:24:42,581 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:42,581 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49ba49f4352eb74e2d81daec4da3f4b0:info 2024-11-18T20:24:42,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/f4dbaf178cd74ac0b9dc967dd21d1e3a as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/f4dbaf178cd74ac0b9dc967dd21d1e3a 2024-11-18T20:24:42,591 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/f4dbaf178cd74ac0b9dc967dd21d1e3a, entries=11, sequenceid=211, filesize=16.4 K 2024-11-18T20:24:42,592 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=3.15 KB/3228 for 49ba49f4352eb74e2d81daec4da3f4b0 in 26ms, sequenceid=211, compaction requested=true 2024-11-18T20:24:42,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:42,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49ba49f4352eb74e2d81daec4da3f4b0:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:24:42,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:42,592 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:24:42,593 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 119027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:24:42,593 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1541): 49ba49f4352eb74e2d81daec4da3f4b0/info is initiating minor compaction (all files) 2024-11-18T20:24:42,593 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49ba49f4352eb74e2d81daec4da3f4b0/info in TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0. 2024-11-18T20:24:42,593 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/8c6083a0bc414bb49539587ee28f64ef, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/7758b991718e40d28f47e0376f6b2cd3, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/f4dbaf178cd74ac0b9dc967dd21d1e3a] into tmpdir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp, totalSize=116.2 K 2024-11-18T20:24:42,594 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8c6083a0bc414bb49539587ee28f64ef, keycount=73, bloomtype=ROW, size=82.3 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1731961466215 2024-11-18T20:24:42,594 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7758b991718e40d28f47e0376f6b2cd3, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1731961482519 2024-11-18T20:24:42,594 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting f4dbaf178cd74ac0b9dc967dd21d1e3a, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1731961482543 2024-11-18T20:24:42,605 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49ba49f4352eb74e2d81daec4da3f4b0#info#compaction#80 average throughput is 49.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:24:42,606 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/1adcf6a7a793485ea54eb1672df0fbb9 is 1080, key is row0062/info:/1731961466215/Put/seqid=0 2024-11-18T20:24:42,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741866_1042 (size=109197) 2024-11-18T20:24:42,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741866_1042 (size=109197) 2024-11-18T20:24:42,616 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/1adcf6a7a793485ea54eb1672df0fbb9 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1adcf6a7a793485ea54eb1672df0fbb9 2024-11-18T20:24:42,622 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49ba49f4352eb74e2d81daec4da3f4b0/info of 49ba49f4352eb74e2d81daec4da3f4b0 into 1adcf6a7a793485ea54eb1672df0fbb9(size=106.6 K), total size for store is 106.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:24:42,623 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:42,623 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0., storeName=49ba49f4352eb74e2d81daec4da3f4b0/info, priority=13, startTime=1731961482592; duration=0sec 2024-11-18T20:24:42,623 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:42,623 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49ba49f4352eb74e2d81daec4da3f4b0:info 2024-11-18T20:24:42,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:42,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:43,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:43,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:44,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:44,585 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ba49f4352eb74e2d81daec4da3f4b0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T20:24:44,590 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/b6d29f002ad24b48ad05e67a1da9f028 is 1080, key is row0158/info:/1731961482567/Put/seqid=0 2024-11-18T20:24:44,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741867_1043 (size=12516) 2024-11-18T20:24:44,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741867_1043 (size=12516) 2024-11-18T20:24:44,598 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=223 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/b6d29f002ad24b48ad05e67a1da9f028 2024-11-18T20:24:44,606 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/b6d29f002ad24b48ad05e67a1da9f028 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/b6d29f002ad24b48ad05e67a1da9f028 2024-11-18T20:24:44,613 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/b6d29f002ad24b48ad05e67a1da9f028, entries=7, sequenceid=223, filesize=12.2 K 2024-11-18T20:24:44,614 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 49ba49f4352eb74e2d81daec4da3f4b0 in 30ms, sequenceid=223, compaction requested=false 2024-11-18T20:24:44,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:44,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:44,615 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ba49f4352eb74e2d81daec4da3f4b0 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-18T20:24:44,620 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/1d1b091df58e45a7a46f6b2de3fdfb0a is 1080, key is row0165/info:/1731961484586/Put/seqid=0 2024-11-18T20:24:44,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741868_1044 (size=16828) 2024-11-18T20:24:44,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741868_1044 (size=16828) 2024-11-18T20:24:44,638 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/1d1b091df58e45a7a46f6b2de3fdfb0a 2024-11-18T20:24:44,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/1d1b091df58e45a7a46f6b2de3fdfb0a as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1d1b091df58e45a7a46f6b2de3fdfb0a 2024-11-18T20:24:44,668 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1d1b091df58e45a7a46f6b2de3fdfb0a, entries=11, sequenceid=237, filesize=16.4 K 2024-11-18T20:24:44,669 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=13.66 KB/13988 for 49ba49f4352eb74e2d81daec4da3f4b0 in 54ms, sequenceid=237, compaction requested=true 2024-11-18T20:24:44,669 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:44,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49ba49f4352eb74e2d81daec4da3f4b0:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:24:44,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:44,669 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:24:44,671 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 138541 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:24:44,671 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1541): 49ba49f4352eb74e2d81daec4da3f4b0/info is initiating minor compaction (all files) 2024-11-18T20:24:44,671 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49ba49f4352eb74e2d81daec4da3f4b0/info in TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0. 2024-11-18T20:24:44,672 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1adcf6a7a793485ea54eb1672df0fbb9, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/b6d29f002ad24b48ad05e67a1da9f028, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1d1b091df58e45a7a46f6b2de3fdfb0a] into tmpdir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp, totalSize=135.3 K 2024-11-18T20:24:44,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:44,672 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ba49f4352eb74e2d81daec4da3f4b0 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-18T20:24:44,672 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1adcf6a7a793485ea54eb1672df0fbb9, keycount=96, bloomtype=ROW, size=106.6 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1731961466215 2024-11-18T20:24:44,673 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting b6d29f002ad24b48ad05e67a1da9f028, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1731961482567 2024-11-18T20:24:44,673 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1d1b091df58e45a7a46f6b2de3fdfb0a, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1731961484586 2024-11-18T20:24:44,678 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/719fb7bf891046ed862e5202fdb8b6be is 1080, key is row0176/info:/1731961484616/Put/seqid=0 2024-11-18T20:24:44,690 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49ba49f4352eb74e2d81daec4da3f4b0#info#compaction#84 average throughput is 38.99 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:24:44,691 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/994fafc195224d4d84beae31425c7691 is 1080, key is row0062/info:/1731961466215/Put/seqid=0 2024-11-18T20:24:44,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741869_1045 (size=20078) 2024-11-18T20:24:44,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741869_1045 (size=20078) 2024-11-18T20:24:44,700 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/719fb7bf891046ed862e5202fdb8b6be 2024-11-18T20:24:44,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741870_1046 (size=128835) 2024-11-18T20:24:44,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741870_1046 (size=128835) 2024-11-18T20:24:44,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/719fb7bf891046ed862e5202fdb8b6be as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/719fb7bf891046ed862e5202fdb8b6be 2024-11-18T20:24:44,716 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/994fafc195224d4d84beae31425c7691 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/994fafc195224d4d84beae31425c7691 2024-11-18T20:24:44,717 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/719fb7bf891046ed862e5202fdb8b6be, entries=14, sequenceid=254, filesize=19.6 K 2024-11-18T20:24:44,718 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=3.15 KB/3228 for 49ba49f4352eb74e2d81daec4da3f4b0 in 46ms, sequenceid=254, compaction requested=false 2024-11-18T20:24:44,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:44,724 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49ba49f4352eb74e2d81daec4da3f4b0/info of 49ba49f4352eb74e2d81daec4da3f4b0 into 994fafc195224d4d84beae31425c7691(size=125.8 K), total size for store is 145.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:24:44,724 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:44,724 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0., storeName=49ba49f4352eb74e2d81daec4da3f4b0/info, priority=13, startTime=1731961484669; duration=0sec 2024-11-18T20:24:44,724 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:44,724 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49ba49f4352eb74e2d81daec4da3f4b0:info 2024-11-18T20:24:44,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:44,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:45,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:45,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:46,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:46,690 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ba49f4352eb74e2d81daec4da3f4b0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T20:24:46,696 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/ffa287d530214179b6a20b4dea02bfdd is 1080, key is row0190/info:/1731961484674/Put/seqid=0 2024-11-18T20:24:46,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741871_1047 (size=12523) 2024-11-18T20:24:46,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741871_1047 (size=12523) 2024-11-18T20:24:46,709 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/ffa287d530214179b6a20b4dea02bfdd 2024-11-18T20:24:46,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/ffa287d530214179b6a20b4dea02bfdd as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/ffa287d530214179b6a20b4dea02bfdd 2024-11-18T20:24:46,725 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/ffa287d530214179b6a20b4dea02bfdd, entries=7, sequenceid=265, filesize=12.2 K 2024-11-18T20:24:46,726 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 49ba49f4352eb74e2d81daec4da3f4b0 in 36ms, sequenceid=265, compaction requested=true 2024-11-18T20:24:46,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:46,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49ba49f4352eb74e2d81daec4da3f4b0:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:24:46,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:46,727 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:24:46,728 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 161436 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:24:46,729 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1541): 49ba49f4352eb74e2d81daec4da3f4b0/info is initiating minor compaction (all files) 2024-11-18T20:24:46,729 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49ba49f4352eb74e2d81daec4da3f4b0/info in TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0. 2024-11-18T20:24:46,729 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/994fafc195224d4d84beae31425c7691, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/719fb7bf891046ed862e5202fdb8b6be, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/ffa287d530214179b6a20b4dea02bfdd] into tmpdir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp, totalSize=157.7 K 2024-11-18T20:24:46,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:46,729 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 994fafc195224d4d84beae31425c7691, keycount=114, bloomtype=ROW, size=125.8 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1731961466215 2024-11-18T20:24:46,730 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ba49f4352eb74e2d81daec4da3f4b0 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-18T20:24:46,730 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 719fb7bf891046ed862e5202fdb8b6be, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1731961484616 2024-11-18T20:24:46,731 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting ffa287d530214179b6a20b4dea02bfdd, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1731961484674 2024-11-18T20:24:46,748 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/f480389e8bf7447bb4937b82f49aad4e is 1080, key is row0197/info:/1731961486692/Put/seqid=0 2024-11-18T20:24:46,751 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49ba49f4352eb74e2d81daec4da3f4b0#info#compaction#87 average throughput is 46.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:24:46,752 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/6a60bda1c3a242ec974562e93a5abe55 is 1080, key is row0062/info:/1731961466215/Put/seqid=0 2024-11-18T20:24:46,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741872_1048 (size=21171) 2024-11-18T20:24:46,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741872_1048 (size=21171) 2024-11-18T20:24:46,777 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=283 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/f480389e8bf7447bb4937b82f49aad4e 2024-11-18T20:24:46,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/f480389e8bf7447bb4937b82f49aad4e as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/f480389e8bf7447bb4937b82f49aad4e 2024-11-18T20:24:46,790 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/f480389e8bf7447bb4937b82f49aad4e, entries=15, sequenceid=283, filesize=20.7 K 2024-11-18T20:24:46,792 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=13.66 KB/13988 for 49ba49f4352eb74e2d81daec4da3f4b0 in 62ms, sequenceid=283, compaction requested=false 2024-11-18T20:24:46,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:46,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741873_1049 (size=151655) 2024-11-18T20:24:46,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741873_1049 (size=151655) 2024-11-18T20:24:46,801 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/6a60bda1c3a242ec974562e93a5abe55 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/6a60bda1c3a242ec974562e93a5abe55 2024-11-18T20:24:46,810 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49ba49f4352eb74e2d81daec4da3f4b0/info of 49ba49f4352eb74e2d81daec4da3f4b0 into 6a60bda1c3a242ec974562e93a5abe55(size=148.1 K), total size for store is 168.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:24:46,810 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:46,810 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0., storeName=49ba49f4352eb74e2d81daec4da3f4b0/info, priority=13, startTime=1731961486727; duration=0sec 2024-11-18T20:24:46,810 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:46,810 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49ba49f4352eb74e2d81daec4da3f4b0:info 2024-11-18T20:24:46,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:46,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:47,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:47,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:48,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:48,768 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ba49f4352eb74e2d81daec4da3f4b0 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-18T20:24:48,773 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/940b2852bce84216938a06e418612328 is 1080, key is row0212/info:/1731961486733/Put/seqid=0 2024-11-18T20:24:48,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741874_1050 (size=20092) 2024-11-18T20:24:48,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741874_1050 (size=20092) 2024-11-18T20:24:48,779 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/940b2852bce84216938a06e418612328 2024-11-18T20:24:48,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/940b2852bce84216938a06e418612328 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/940b2852bce84216938a06e418612328 2024-11-18T20:24:48,793 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/940b2852bce84216938a06e418612328, entries=14, sequenceid=301, filesize=19.6 K 2024-11-18T20:24:48,797 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for 49ba49f4352eb74e2d81daec4da3f4b0 in 29ms, sequenceid=301, compaction requested=true 2024-11-18T20:24:48,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:48,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49ba49f4352eb74e2d81daec4da3f4b0:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:24:48,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:48,798 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:24:48,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:48,798 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ba49f4352eb74e2d81daec4da3f4b0 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-18T20:24:48,800 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 192918 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:24:48,800 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1541): 49ba49f4352eb74e2d81daec4da3f4b0/info is initiating minor compaction (all files) 2024-11-18T20:24:48,800 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49ba49f4352eb74e2d81daec4da3f4b0/info in TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0. 2024-11-18T20:24:48,800 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/6a60bda1c3a242ec974562e93a5abe55, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/f480389e8bf7447bb4937b82f49aad4e, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/940b2852bce84216938a06e418612328] into tmpdir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp, totalSize=188.4 K 2024-11-18T20:24:48,800 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6a60bda1c3a242ec974562e93a5abe55, keycount=135, bloomtype=ROW, size=148.1 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1731961466215 2024-11-18T20:24:48,801 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting f480389e8bf7447bb4937b82f49aad4e, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1731961486692 2024-11-18T20:24:48,801 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 940b2852bce84216938a06e418612328, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1731961486733 2024-11-18T20:24:48,803 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/4f7c75b72cc347afa6d31411679401cd is 1080, key is row0226/info:/1731961488769/Put/seqid=0 2024-11-18T20:24:48,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741875_1051 (size=17918) 2024-11-18T20:24:48,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741875_1051 (size=17918) 2024-11-18T20:24:48,819 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/4f7c75b72cc347afa6d31411679401cd 2024-11-18T20:24:48,823 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49ba49f4352eb74e2d81daec4da3f4b0#info#compaction#90 average throughput is 42.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:24:48,824 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/d572a492ae5444b1b8d19b41ab154c50 is 1080, key is row0062/info:/1731961466215/Put/seqid=0 2024-11-18T20:24:48,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/4f7c75b72cc347afa6d31411679401cd as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/4f7c75b72cc347afa6d31411679401cd 2024-11-18T20:24:48,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741876_1052 (size=183056) 2024-11-18T20:24:48,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741876_1052 (size=183056) 2024-11-18T20:24:48,839 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/4f7c75b72cc347afa6d31411679401cd, entries=12, sequenceid=316, filesize=17.5 K 2024-11-18T20:24:48,841 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=12.61 KB/12912 for 49ba49f4352eb74e2d81daec4da3f4b0 in 42ms, sequenceid=316, compaction requested=false 2024-11-18T20:24:48,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:48,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37687 {}] regionserver.HRegion(8855): Flush requested on 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:48,842 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ba49f4352eb74e2d81daec4da3f4b0 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-18T20:24:48,848 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/d572a492ae5444b1b8d19b41ab154c50 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/d572a492ae5444b1b8d19b41ab154c50 2024-11-18T20:24:48,850 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/1c46352a8234477a9b7f9e7f8d358e7b is 1080, key is row0238/info:/1731961488800/Put/seqid=0 2024-11-18T20:24:48,856 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49ba49f4352eb74e2d81daec4da3f4b0/info of 49ba49f4352eb74e2d81daec4da3f4b0 into d572a492ae5444b1b8d19b41ab154c50(size=178.8 K), total size for store is 196.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:24:48,856 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:48,857 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0., storeName=49ba49f4352eb74e2d81daec4da3f4b0/info, priority=13, startTime=1731961488797; duration=0sec 2024-11-18T20:24:48,857 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:48,857 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49ba49f4352eb74e2d81daec4da3f4b0:info 2024-11-18T20:24:48,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741877_1053 (size=19013) 2024-11-18T20:24:48,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741877_1053 (size=19013) 2024-11-18T20:24:48,869 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/1c46352a8234477a9b7f9e7f8d358e7b 2024-11-18T20:24:48,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:48,876 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/1c46352a8234477a9b7f9e7f8d358e7b as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1c46352a8234477a9b7f9e7f8d358e7b 2024-11-18T20:24:48,886 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1c46352a8234477a9b7f9e7f8d358e7b, entries=13, sequenceid=332, filesize=18.6 K 2024-11-18T20:24:48,888 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=6.30 KB/6456 for 49ba49f4352eb74e2d81daec4da3f4b0 in 47ms, sequenceid=332, compaction requested=true 2024-11-18T20:24:48,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:48,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49ba49f4352eb74e2d81daec4da3f4b0:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:24:48,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:48,888 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:24:48,890 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 219987 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:24:48,890 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1541): 49ba49f4352eb74e2d81daec4da3f4b0/info is initiating minor compaction (all files) 2024-11-18T20:24:48,890 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49ba49f4352eb74e2d81daec4da3f4b0/info in TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0. 2024-11-18T20:24:48,890 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/d572a492ae5444b1b8d19b41ab154c50, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/4f7c75b72cc347afa6d31411679401cd, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1c46352a8234477a9b7f9e7f8d358e7b] into tmpdir=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp, totalSize=214.8 K 2024-11-18T20:24:48,891 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting d572a492ae5444b1b8d19b41ab154c50, keycount=164, bloomtype=ROW, size=178.8 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1731961466215 2024-11-18T20:24:48,891 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4f7c75b72cc347afa6d31411679401cd, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1731961488769 2024-11-18T20:24:48,892 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1c46352a8234477a9b7f9e7f8d358e7b, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1731961488800 2024-11-18T20:24:48,910 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49ba49f4352eb74e2d81daec4da3f4b0#info#compaction#92 average throughput is 32.32 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:24:48,910 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/94b6c5d9f244439488f4ea84f1cdeed1 is 1080, key is row0062/info:/1731961466215/Put/seqid=0 2024-11-18T20:24:48,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741878_1054 (size=210210) 2024-11-18T20:24:48,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741878_1054 (size=210210) 2024-11-18T20:24:48,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:48,924 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/94b6c5d9f244439488f4ea84f1cdeed1 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/94b6c5d9f244439488f4ea84f1cdeed1 2024-11-18T20:24:48,939 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49ba49f4352eb74e2d81daec4da3f4b0/info of 49ba49f4352eb74e2d81daec4da3f4b0 into 94b6c5d9f244439488f4ea84f1cdeed1(size=205.3 K), total size for store is 205.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:24:48,939 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:48,939 INFO [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0., storeName=49ba49f4352eb74e2d81daec4da3f4b0/info, priority=13, startTime=1731961488888; duration=0sec 2024-11-18T20:24:48,939 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:24:48,939 DEBUG [RS:0;5a964fc427ed:37687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49ba49f4352eb74e2d81daec4da3f4b0:info 2024-11-18T20:24:49,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:49,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:50,867 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-18T20:24:50,867 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C37687%2C1731961452840.1731961490867 2024-11-18T20:24:50,873 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:50,873 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:50,873 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:50,873 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:50,873 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:50,873 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/WALs/5a964fc427ed,37687,1731961452840/5a964fc427ed%2C37687%2C1731961452840.1731961453355 with entries=318, filesize=310.38 KB; new WAL /user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/WALs/5a964fc427ed,37687,1731961452840/5a964fc427ed%2C37687%2C1731961452840.1731961490867 2024-11-18T20:24:50,874 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43461:43461),(127.0.0.1/127.0.0.1:38555:38555)] 2024-11-18T20:24:50,874 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/WALs/5a964fc427ed,37687,1731961452840/5a964fc427ed%2C37687%2C1731961452840.1731961453355 is not closed yet, will try archiving it next time 2024-11-18T20:24:50,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741833_1009 (size=317837) 2024-11-18T20:24:50,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741833_1009 (size=317837) 2024-11-18T20:24:50,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:50,898 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 49ba49f4352eb74e2d81daec4da3f4b0 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-11-18T20:24:50,903 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/0e42bf7004824735b5f0235f8c262229 is 1080, key is row0251/info:/1731961488843/Put/seqid=0 2024-11-18T20:24:50,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741880_1056 (size=11436) 2024-11-18T20:24:50,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741880_1056 (size=11436) 2024-11-18T20:24:50,909 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/0e42bf7004824735b5f0235f8c262229 2024-11-18T20:24:50,915 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/.tmp/info/0e42bf7004824735b5f0235f8c262229 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/0e42bf7004824735b5f0235f8c262229 2024-11-18T20:24:50,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:50,920 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/0e42bf7004824735b5f0235f8c262229, entries=6, sequenceid=343, filesize=11.2 K 2024-11-18T20:24:50,921 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6456, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 49ba49f4352eb74e2d81daec4da3f4b0 in 23ms, sequenceid=343, compaction requested=false 2024-11-18T20:24:50,921 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 49ba49f4352eb74e2d81daec4da3f4b0: 2024-11-18T20:24:50,921 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for b1ea4d159b7999e1e5347a6755cf09c6: 2024-11-18T20:24:50,921 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-18T20:24:50,926 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740/.tmp/info/b9ff21b2be6a4000af44a3933e2ac9f9 is 186, key is TestLogRolling-testLogRolling,,1731961468321.b1ea4d159b7999e1e5347a6755cf09c6./info:regioninfo/1731961469169/Put/seqid=0 2024-11-18T20:24:50,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741881_1057 (size=6153) 2024-11-18T20:24:50,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741881_1057 (size=6153) 2024-11-18T20:24:50,931 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740/.tmp/info/b9ff21b2be6a4000af44a3933e2ac9f9 2024-11-18T20:24:50,938 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740/.tmp/info/b9ff21b2be6a4000af44a3933e2ac9f9 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740/info/b9ff21b2be6a4000af44a3933e2ac9f9 2024-11-18T20:24:50,944 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740/info/b9ff21b2be6a4000af44a3933e2ac9f9, entries=5, sequenceid=21, filesize=6.0 K 2024-11-18T20:24:50,945 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 24ms, sequenceid=21, compaction requested=false 2024-11-18T20:24:50,945 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-18T20:24:50,946 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C37687%2C1731961452840.1731961490945 2024-11-18T20:24:50,976 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:50,976 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:50,976 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:50,976 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:50,976 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:50,976 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/WALs/5a964fc427ed,37687,1731961452840/5a964fc427ed%2C37687%2C1731961452840.1731961490867 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/WALs/5a964fc427ed,37687,1731961452840/5a964fc427ed%2C37687%2C1731961452840.1731961490945 2024-11-18T20:24:50,977 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43461:43461),(127.0.0.1/127.0.0.1:38555:38555)] 2024-11-18T20:24:50,977 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/WALs/5a964fc427ed,37687,1731961452840/5a964fc427ed%2C37687%2C1731961452840.1731961490867 is not closed yet, will try archiving it next time 2024-11-18T20:24:50,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741879_1055 (size=731) 2024-11-18T20:24:50,978 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/WALs/5a964fc427ed,37687,1731961452840/5a964fc427ed%2C37687%2C1731961452840.1731961453355 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/oldWALs/5a964fc427ed%2C37687%2C1731961452840.1731961453355 2024-11-18T20:24:50,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741879_1055 (size=731) 2024-11-18T20:24:50,979 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-18T20:24:50,979 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T20:24:50,980 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:24:50,980 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/WALs/5a964fc427ed,37687,1731961452840/5a964fc427ed%2C37687%2C1731961452840.1731961490867 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/oldWALs/5a964fc427ed%2C37687%2C1731961452840.1731961490867 2024-11-18T20:24:50,980 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:24:50,980 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:24:50,980 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:24:50,980 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T20:24:50,980 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T20:24:50,980 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=394603458, stopped=false 2024-11-18T20:24:50,980 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5a964fc427ed,44881,1731961452670 2024-11-18T20:24:51,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37687-0x10150cc3c7c0001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:24:51,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37687-0x10150cc3c7c0001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:51,020 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:24:51,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:24:51,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:51,020 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:24:51,021 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:24:51,021 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:24:51,021 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5a964fc427ed,37687,1731961452840' ***** 2024-11-18T20:24:51,021 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T20:24:51,022 INFO [RS:0;5a964fc427ed:37687 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T20:24:51,022 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T20:24:51,022 INFO [RS:0;5a964fc427ed:37687 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T20:24:51,022 INFO [RS:0;5a964fc427ed:37687 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T20:24:51,022 INFO [RS:0;5a964fc427ed:37687 {}] regionserver.HRegionServer(3091): Received CLOSE for 49ba49f4352eb74e2d81daec4da3f4b0 2024-11-18T20:24:51,022 INFO [RS:0;5a964fc427ed:37687 {}] regionserver.HRegionServer(3091): Received CLOSE for b1ea4d159b7999e1e5347a6755cf09c6 2024-11-18T20:24:51,022 INFO [RS:0;5a964fc427ed:37687 {}] regionserver.HRegionServer(959): stopping server 5a964fc427ed,37687,1731961452840 2024-11-18T20:24:51,022 INFO [RS:0;5a964fc427ed:37687 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:24:51,022 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 49ba49f4352eb74e2d81daec4da3f4b0, disabling compactions & flushes 2024-11-18T20:24:51,022 INFO [RS:0;5a964fc427ed:37687 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5a964fc427ed:37687. 2024-11-18T20:24:51,022 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0. 2024-11-18T20:24:51,022 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0. 2024-11-18T20:24:51,022 DEBUG [RS:0;5a964fc427ed:37687 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:24:51,022 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0. after waiting 0 ms 2024-11-18T20:24:51,022 DEBUG [RS:0;5a964fc427ed:37687 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:24:51,022 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0. 2024-11-18T20:24:51,022 INFO [RS:0;5a964fc427ed:37687 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T20:24:51,022 INFO [RS:0;5a964fc427ed:37687 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T20:24:51,022 INFO [RS:0;5a964fc427ed:37687 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T20:24:51,023 INFO [RS:0;5a964fc427ed:37687 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T20:24:51,024 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37687-0x10150cc3c7c0001, quorum=127.0.0.1:60438, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:24:51,024 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:24:51,031 INFO [RS:0;5a964fc427ed:37687 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-18T20:24:51,031 DEBUG [RS:0;5a964fc427ed:37687 {}] regionserver.HRegionServer(1325): Online Regions={49ba49f4352eb74e2d81daec4da3f4b0=TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0., b1ea4d159b7999e1e5347a6755cf09c6=TestLogRolling-testLogRolling,,1731961468321.b1ea4d159b7999e1e5347a6755cf09c6., 1588230740=hbase:meta,,1.1588230740} 2024-11-18T20:24:51,031 DEBUG [RS:0;5a964fc427ed:37687 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 49ba49f4352eb74e2d81daec4da3f4b0, b1ea4d159b7999e1e5347a6755cf09c6 2024-11-18T20:24:51,031 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1c90ebfc69b44c91b9d7685cc7a4548b.3081dea91b057915b834d5b83e904f00->hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/1c90ebfc69b44c91b9d7685cc7a4548b-top, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/TestLogRolling-testLogRolling=3081dea91b057915b834d5b83e904f00-562355dbfce544b192d5d64c60e786a7, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/05e7644b891149dea616f123726808b4, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/TestLogRolling-testLogRolling=3081dea91b057915b834d5b83e904f00-de5ce08ef7c642cda0860d666a977562, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1e5e729f6fbf442cb24f13f3e86a20e3, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/59cdfbce519f40b9beae21f8b8b34ea6, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/f39b5dfc88e74e468f49037d717eda88, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/150260dceb8f40ce9d2822e53039d2bf, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/8c6083a0bc414bb49539587ee28f64ef, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/2832664f8ab44fe3b7d591555adf1fb2, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/7758b991718e40d28f47e0376f6b2cd3, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1adcf6a7a793485ea54eb1672df0fbb9, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/f4dbaf178cd74ac0b9dc967dd21d1e3a, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/b6d29f002ad24b48ad05e67a1da9f028, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/994fafc195224d4d84beae31425c7691, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1d1b091df58e45a7a46f6b2de3fdfb0a, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/719fb7bf891046ed862e5202fdb8b6be, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/6a60bda1c3a242ec974562e93a5abe55, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/ffa287d530214179b6a20b4dea02bfdd, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/f480389e8bf7447bb4937b82f49aad4e, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/d572a492ae5444b1b8d19b41ab154c50, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/940b2852bce84216938a06e418612328, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/4f7c75b72cc347afa6d31411679401cd, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1c46352a8234477a9b7f9e7f8d358e7b] to archive 2024-11-18T20:24:51,032 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:24:51,032 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:24:51,032 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:24:51,032 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:24:51,032 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:24:51,033 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T20:24:51,035 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1c90ebfc69b44c91b9d7685cc7a4548b.3081dea91b057915b834d5b83e904f00 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1c90ebfc69b44c91b9d7685cc7a4548b.3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:51,037 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/TestLogRolling-testLogRolling=3081dea91b057915b834d5b83e904f00-562355dbfce544b192d5d64c60e786a7 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/TestLogRolling-testLogRolling=3081dea91b057915b834d5b83e904f00-562355dbfce544b192d5d64c60e786a7 2024-11-18T20:24:51,039 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/05e7644b891149dea616f123726808b4 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/05e7644b891149dea616f123726808b4 2024-11-18T20:24:51,041 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/TestLogRolling-testLogRolling=3081dea91b057915b834d5b83e904f00-de5ce08ef7c642cda0860d666a977562 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/TestLogRolling-testLogRolling=3081dea91b057915b834d5b83e904f00-de5ce08ef7c642cda0860d666a977562 2024-11-18T20:24:51,045 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1e5e729f6fbf442cb24f13f3e86a20e3 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1e5e729f6fbf442cb24f13f3e86a20e3 2024-11-18T20:24:51,047 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/59cdfbce519f40b9beae21f8b8b34ea6 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/59cdfbce519f40b9beae21f8b8b34ea6 2024-11-18T20:24:51,049 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/f39b5dfc88e74e468f49037d717eda88 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/f39b5dfc88e74e468f49037d717eda88 2024-11-18T20:24:51,051 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/150260dceb8f40ce9d2822e53039d2bf to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/150260dceb8f40ce9d2822e53039d2bf 2024-11-18T20:24:51,053 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/8c6083a0bc414bb49539587ee28f64ef to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/8c6083a0bc414bb49539587ee28f64ef 2024-11-18T20:24:51,055 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/2832664f8ab44fe3b7d591555adf1fb2 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/2832664f8ab44fe3b7d591555adf1fb2 2024-11-18T20:24:51,057 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/7758b991718e40d28f47e0376f6b2cd3 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/7758b991718e40d28f47e0376f6b2cd3 2024-11-18T20:24:51,058 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-18T20:24:51,059 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:24:51,059 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:24:51,059 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961491031Running coprocessor pre-close hooks at 1731961491031Disabling compacts and flushes for region at 1731961491031Disabling writes for close at 1731961491032 (+1 ms)Writing region close event to WAL at 1731961491052 (+20 ms)Running coprocessor post-close hooks at 1731961491058 (+6 ms)Closed at 1731961491059 (+1 ms) 2024-11-18T20:24:51,059 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T20:24:51,060 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1adcf6a7a793485ea54eb1672df0fbb9 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1adcf6a7a793485ea54eb1672df0fbb9 2024-11-18T20:24:51,062 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/f4dbaf178cd74ac0b9dc967dd21d1e3a to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/f4dbaf178cd74ac0b9dc967dd21d1e3a 2024-11-18T20:24:51,064 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/b6d29f002ad24b48ad05e67a1da9f028 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/b6d29f002ad24b48ad05e67a1da9f028 2024-11-18T20:24:51,066 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/994fafc195224d4d84beae31425c7691 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/994fafc195224d4d84beae31425c7691 2024-11-18T20:24:51,068 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1d1b091df58e45a7a46f6b2de3fdfb0a to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1d1b091df58e45a7a46f6b2de3fdfb0a 2024-11-18T20:24:51,069 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/719fb7bf891046ed862e5202fdb8b6be to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/719fb7bf891046ed862e5202fdb8b6be 2024-11-18T20:24:51,071 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/6a60bda1c3a242ec974562e93a5abe55 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/6a60bda1c3a242ec974562e93a5abe55 2024-11-18T20:24:51,073 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/ffa287d530214179b6a20b4dea02bfdd to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/ffa287d530214179b6a20b4dea02bfdd 2024-11-18T20:24:51,075 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/f480389e8bf7447bb4937b82f49aad4e to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/f480389e8bf7447bb4937b82f49aad4e 2024-11-18T20:24:51,076 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/d572a492ae5444b1b8d19b41ab154c50 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/d572a492ae5444b1b8d19b41ab154c50 2024-11-18T20:24:51,078 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/940b2852bce84216938a06e418612328 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/940b2852bce84216938a06e418612328 2024-11-18T20:24:51,079 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/4f7c75b72cc347afa6d31411679401cd to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/4f7c75b72cc347afa6d31411679401cd 2024-11-18T20:24:51,081 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1c46352a8234477a9b7f9e7f8d358e7b to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/info/1c46352a8234477a9b7f9e7f8d358e7b 2024-11-18T20:24:51,081 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5a964fc427ed:44881 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-18T20:24:51,082 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [05e7644b891149dea616f123726808b4=42984, 1e5e729f6fbf442cb24f13f3e86a20e3=12516, 59cdfbce519f40b9beae21f8b8b34ea6=63636, f39b5dfc88e74e468f49037d717eda88=17906, 150260dceb8f40ce9d2822e53039d2bf=17906, 8c6083a0bc414bb49539587ee28f64ef=84293, 2832664f8ab44fe3b7d591555adf1fb2=12516, 7758b991718e40d28f47e0376f6b2cd3=17906, 1adcf6a7a793485ea54eb1672df0fbb9=109197, f4dbaf178cd74ac0b9dc967dd21d1e3a=16828, b6d29f002ad24b48ad05e67a1da9f028=12516, 994fafc195224d4d84beae31425c7691=128835, 1d1b091df58e45a7a46f6b2de3fdfb0a=16828, 719fb7bf891046ed862e5202fdb8b6be=20078, 6a60bda1c3a242ec974562e93a5abe55=151655, ffa287d530214179b6a20b4dea02bfdd=12523, f480389e8bf7447bb4937b82f49aad4e=21171, d572a492ae5444b1b8d19b41ab154c50=183056, 940b2852bce84216938a06e418612328=20092, 4f7c75b72cc347afa6d31411679401cd=17918, 1c46352a8234477a9b7f9e7f8d358e7b=19013] 2024-11-18T20:24:51,087 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/49ba49f4352eb74e2d81daec4da3f4b0/recovered.edits/346.seqid, newMaxSeqId=346, maxSeqId=130 2024-11-18T20:24:51,088 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0. 2024-11-18T20:24:51,088 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 49ba49f4352eb74e2d81daec4da3f4b0: Waiting for close lock at 1731961491022Running coprocessor pre-close hooks at 1731961491022Disabling compacts and flushes for region at 1731961491022Disabling writes for close at 1731961491022Writing region close event to WAL at 1731961491083 (+61 ms)Running coprocessor post-close hooks at 1731961491088 (+5 ms)Closed at 1731961491088 2024-11-18T20:24:51,089 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731961468321.49ba49f4352eb74e2d81daec4da3f4b0. 2024-11-18T20:24:51,089 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b1ea4d159b7999e1e5347a6755cf09c6, disabling compactions & flushes 2024-11-18T20:24:51,089 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731961468321.b1ea4d159b7999e1e5347a6755cf09c6. 2024-11-18T20:24:51,089 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731961468321.b1ea4d159b7999e1e5347a6755cf09c6. 2024-11-18T20:24:51,089 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731961468321.b1ea4d159b7999e1e5347a6755cf09c6. after waiting 0 ms 2024-11-18T20:24:51,089 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731961468321.b1ea4d159b7999e1e5347a6755cf09c6. 2024-11-18T20:24:51,089 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961468321.b1ea4d159b7999e1e5347a6755cf09c6.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/b1ea4d159b7999e1e5347a6755cf09c6/info/1c90ebfc69b44c91b9d7685cc7a4548b.3081dea91b057915b834d5b83e904f00->hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/3081dea91b057915b834d5b83e904f00/info/1c90ebfc69b44c91b9d7685cc7a4548b-bottom] to archive 2024-11-18T20:24:51,091 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961468321.b1ea4d159b7999e1e5347a6755cf09c6.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T20:24:51,093 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961468321.b1ea4d159b7999e1e5347a6755cf09c6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/b1ea4d159b7999e1e5347a6755cf09c6/info/1c90ebfc69b44c91b9d7685cc7a4548b.3081dea91b057915b834d5b83e904f00 to hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/archive/data/default/TestLogRolling-testLogRolling/b1ea4d159b7999e1e5347a6755cf09c6/info/1c90ebfc69b44c91b9d7685cc7a4548b.3081dea91b057915b834d5b83e904f00 2024-11-18T20:24:51,093 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731961468321.b1ea4d159b7999e1e5347a6755cf09c6.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-18T20:24:51,098 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/data/default/TestLogRolling-testLogRolling/b1ea4d159b7999e1e5347a6755cf09c6/recovered.edits/135.seqid, newMaxSeqId=135, maxSeqId=130 2024-11-18T20:24:51,099 INFO [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731961468321.b1ea4d159b7999e1e5347a6755cf09c6. 2024-11-18T20:24:51,099 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b1ea4d159b7999e1e5347a6755cf09c6: Waiting for close lock at 1731961491089Running coprocessor pre-close hooks at 1731961491089Disabling compacts and flushes for region at 1731961491089Disabling writes for close at 1731961491089Writing region close event to WAL at 1731961491094 (+5 ms)Running coprocessor post-close hooks at 1731961491099 (+5 ms)Closed at 1731961491099 2024-11-18T20:24:51,099 DEBUG [RS_CLOSE_REGION-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731961468321.b1ea4d159b7999e1e5347a6755cf09c6. 2024-11-18T20:24:51,231 INFO [RS:0;5a964fc427ed:37687 {}] regionserver.HRegionServer(976): stopping server 5a964fc427ed,37687,1731961452840; all regions closed. 2024-11-18T20:24:51,232 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:51,232 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:51,232 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:51,232 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:51,232 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:51,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741834_1010 (size=8107) 2024-11-18T20:24:51,237 INFO [regionserver/5a964fc427ed:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:24:51,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741834_1010 (size=8107) 2024-11-18T20:24:51,243 DEBUG [RS:0;5a964fc427ed:37687 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/oldWALs 2024-11-18T20:24:51,243 INFO [RS:0;5a964fc427ed:37687 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5a964fc427ed%2C37687%2C1731961452840.meta:.meta(num 1731961453787) 2024-11-18T20:24:51,244 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:51,244 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:51,244 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:51,244 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:51,245 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:51,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741882_1058 (size=780) 2024-11-18T20:24:51,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741882_1058 (size=780) 2024-11-18T20:24:51,250 DEBUG [RS:0;5a964fc427ed:37687 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/oldWALs 2024-11-18T20:24:51,250 INFO [RS:0;5a964fc427ed:37687 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5a964fc427ed%2C37687%2C1731961452840:(num 1731961490945) 2024-11-18T20:24:51,250 DEBUG [RS:0;5a964fc427ed:37687 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:24:51,250 INFO [RS:0;5a964fc427ed:37687 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:24:51,250 INFO [RS:0;5a964fc427ed:37687 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:24:51,251 INFO [RS:0;5a964fc427ed:37687 {}] hbase.ChoreService(370): Chore service for: regionserver/5a964fc427ed:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T20:24:51,251 INFO [RS:0;5a964fc427ed:37687 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:24:51,251 INFO [RS:0;5a964fc427ed:37687 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37687 2024-11-18T20:24:51,251 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:24:51,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37687-0x10150cc3c7c0001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5a964fc427ed,37687,1731961452840 2024-11-18T20:24:51,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:24:51,262 INFO [RS:0;5a964fc427ed:37687 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:24:51,272 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5a964fc427ed,37687,1731961452840] 2024-11-18T20:24:51,280 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5a964fc427ed,37687,1731961452840 already deleted, retry=false 2024-11-18T20:24:51,280 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5a964fc427ed,37687,1731961452840 expired; onlineServers=0 2024-11-18T20:24:51,280 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5a964fc427ed,44881,1731961452670' ***** 2024-11-18T20:24:51,281 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T20:24:51,281 INFO [M:0;5a964fc427ed:44881 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:24:51,281 INFO [M:0;5a964fc427ed:44881 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:24:51,281 DEBUG [M:0;5a964fc427ed:44881 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T20:24:51,281 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T20:24:51,281 DEBUG [M:0;5a964fc427ed:44881 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T20:24:51,281 DEBUG [master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.large.0-1731961453132 {}] cleaner.HFileCleaner(306): Exit Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.large.0-1731961453132,5,FailOnTimeoutGroup] 2024-11-18T20:24:51,281 DEBUG [master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.small.0-1731961453134 {}] cleaner.HFileCleaner(306): Exit Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.small.0-1731961453134,5,FailOnTimeoutGroup] 2024-11-18T20:24:51,281 INFO [M:0;5a964fc427ed:44881 {}] hbase.ChoreService(370): Chore service for: master/5a964fc427ed:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T20:24:51,281 INFO [M:0;5a964fc427ed:44881 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:24:51,281 DEBUG [M:0;5a964fc427ed:44881 {}] master.HMaster(1795): Stopping service threads 2024-11-18T20:24:51,281 INFO [M:0;5a964fc427ed:44881 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T20:24:51,282 INFO [M:0;5a964fc427ed:44881 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:24:51,282 INFO [M:0;5a964fc427ed:44881 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T20:24:51,282 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T20:24:51,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T20:24:51,289 DEBUG [M:0;5a964fc427ed:44881 {}] zookeeper.ZKUtil(347): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T20:24:51,289 WARN [M:0;5a964fc427ed:44881 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T20:24:51,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:51,289 INFO [M:0;5a964fc427ed:44881 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/.lastflushedseqids 2024-11-18T20:24:51,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741883_1059 (size=228) 2024-11-18T20:24:51,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741883_1059 (size=228) 2024-11-18T20:24:51,297 INFO [M:0;5a964fc427ed:44881 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T20:24:51,297 INFO [M:0;5a964fc427ed:44881 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T20:24:51,297 DEBUG [M:0;5a964fc427ed:44881 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:24:51,297 INFO [M:0;5a964fc427ed:44881 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:24:51,297 DEBUG [M:0;5a964fc427ed:44881 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:24:51,297 DEBUG [M:0;5a964fc427ed:44881 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:24:51,297 DEBUG [M:0;5a964fc427ed:44881 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:24:51,297 INFO [M:0;5a964fc427ed:44881 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.44 KB heapSize=63.39 KB 2024-11-18T20:24:51,321 DEBUG [M:0;5a964fc427ed:44881 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6165dabff16945a8b8cdaddd9ee504e6 is 82, key is hbase:meta,,1/info:regioninfo/1731961453811/Put/seqid=0 2024-11-18T20:24:51,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741884_1060 (size=5672) 2024-11-18T20:24:51,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741884_1060 (size=5672) 2024-11-18T20:24:51,333 INFO [M:0;5a964fc427ed:44881 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6165dabff16945a8b8cdaddd9ee504e6 2024-11-18T20:24:51,358 DEBUG [M:0;5a964fc427ed:44881 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e5ae6bf911824b639acd1a79f378adaf is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731961454367/Put/seqid=0 2024-11-18T20:24:51,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37687-0x10150cc3c7c0001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:24:51,373 INFO [RS:0;5a964fc427ed:37687 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:24:51,373 INFO [RS:0;5a964fc427ed:37687 {}] regionserver.HRegionServer(1031): Exiting; stopping=5a964fc427ed,37687,1731961452840; zookeeper connection closed. 2024-11-18T20:24:51,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37687-0x10150cc3c7c0001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:24:51,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741885_1061 (size=7091) 2024-11-18T20:24:51,397 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4c1e558e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4c1e558e 2024-11-18T20:24:51,397 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T20:24:51,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741885_1061 (size=7091) 2024-11-18T20:24:51,399 INFO [M:0;5a964fc427ed:44881 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.84 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e5ae6bf911824b639acd1a79f378adaf 2024-11-18T20:24:51,406 INFO [M:0;5a964fc427ed:44881 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e5ae6bf911824b639acd1a79f378adaf 2024-11-18T20:24:51,429 DEBUG [M:0;5a964fc427ed:44881 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/022fc7105edd4c1bbc4e57eba51f9f7b is 69, key is 5a964fc427ed,37687,1731961452840/rs:state/1731961453198/Put/seqid=0 2024-11-18T20:24:51,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741886_1062 (size=5156) 2024-11-18T20:24:51,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741886_1062 (size=5156) 2024-11-18T20:24:51,447 INFO [M:0;5a964fc427ed:44881 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/022fc7105edd4c1bbc4e57eba51f9f7b 2024-11-18T20:24:51,477 DEBUG [M:0;5a964fc427ed:44881 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e12b0bf6f78f4005b166761c07674f9b is 52, key is load_balancer_on/state:d/1731961453977/Put/seqid=0 2024-11-18T20:24:51,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741887_1063 (size=5056) 2024-11-18T20:24:51,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741887_1063 (size=5056) 2024-11-18T20:24:51,498 INFO [M:0;5a964fc427ed:44881 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e12b0bf6f78f4005b166761c07674f9b 2024-11-18T20:24:51,505 DEBUG [M:0;5a964fc427ed:44881 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6165dabff16945a8b8cdaddd9ee504e6 as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6165dabff16945a8b8cdaddd9ee504e6 2024-11-18T20:24:51,514 INFO [M:0;5a964fc427ed:44881 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6165dabff16945a8b8cdaddd9ee504e6, entries=8, sequenceid=125, filesize=5.5 K 2024-11-18T20:24:51,516 DEBUG [M:0;5a964fc427ed:44881 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e5ae6bf911824b639acd1a79f378adaf as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e5ae6bf911824b639acd1a79f378adaf 2024-11-18T20:24:51,522 INFO [M:0;5a964fc427ed:44881 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e5ae6bf911824b639acd1a79f378adaf 2024-11-18T20:24:51,522 INFO [M:0;5a964fc427ed:44881 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e5ae6bf911824b639acd1a79f378adaf, entries=13, sequenceid=125, filesize=6.9 K 2024-11-18T20:24:51,523 DEBUG [M:0;5a964fc427ed:44881 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/022fc7105edd4c1bbc4e57eba51f9f7b as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/022fc7105edd4c1bbc4e57eba51f9f7b 2024-11-18T20:24:51,530 INFO [M:0;5a964fc427ed:44881 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/022fc7105edd4c1bbc4e57eba51f9f7b, entries=1, sequenceid=125, filesize=5.0 K 2024-11-18T20:24:51,531 DEBUG [M:0;5a964fc427ed:44881 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e12b0bf6f78f4005b166761c07674f9b as hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e12b0bf6f78f4005b166761c07674f9b 2024-11-18T20:24:51,537 INFO [M:0;5a964fc427ed:44881 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35761/user/jenkins/test-data/3f48b1b6-97d5-bfd4-df9d-aef32a10a2af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e12b0bf6f78f4005b166761c07674f9b, entries=1, sequenceid=125, filesize=4.9 K 2024-11-18T20:24:51,539 INFO [M:0;5a964fc427ed:44881 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.44 KB/52675, heapSize ~63.33 KB/64848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 242ms, sequenceid=125, compaction requested=false 2024-11-18T20:24:51,544 INFO [M:0;5a964fc427ed:44881 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:24:51,544 DEBUG [M:0;5a964fc427ed:44881 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961491297Disabling compacts and flushes for region at 1731961491297Disabling writes for close at 1731961491297Obtaining lock to block concurrent updates at 1731961491297Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731961491297Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52675, getHeapSize=64848, getOffHeapSize=0, getCellsCount=148 at 1731961491298 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731961491299 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731961491299Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731961491321 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731961491321Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731961491339 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731961491357 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731961491357Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731961491406 (+49 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731961491428 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731961491428Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731961491455 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731961491476 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731961491477 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34daead4: reopening flushed file at 1731961491503 (+26 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ee5d43f: reopening flushed file at 1731961491514 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2421ec76: reopening flushed file at 1731961491522 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@70ea9c3d: reopening flushed file at 1731961491530 (+8 ms)Finished flush of dataSize ~51.44 KB/52675, heapSize ~63.33 KB/64848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 242ms, sequenceid=125, compaction requested=false at 1731961491539 (+9 ms)Writing region close event to WAL at 1731961491544 (+5 ms)Closed at 1731961491544 2024-11-18T20:24:51,559 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:51,559 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:51,559 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:51,559 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:51,559 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:51,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37293 is added to blk_1073741830_1006 (size=61344) 2024-11-18T20:24:51,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741830_1006 (size=61344) 2024-11-18T20:24:51,563 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:24:51,563 INFO [M:0;5a964fc427ed:44881 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T20:24:51,563 INFO [M:0;5a964fc427ed:44881 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44881 2024-11-18T20:24:51,563 INFO [M:0;5a964fc427ed:44881 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:24:51,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:24:51,672 INFO [M:0;5a964fc427ed:44881 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:24:51,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44881-0x10150cc3c7c0000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:24:51,675 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@444decb3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:24:51,677 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@216b0c63{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:24:51,677 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:24:51,677 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ab86f9f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:24:51,677 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b4ed6ad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/hadoop.log.dir/,STOPPED} 2024-11-18T20:24:51,680 WARN [BP-1888943501-172.17.0.2-1731961450756 heartbeating to localhost/127.0.0.1:35761 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:24:51,680 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:24:51,680 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:24:51,680 WARN [BP-1888943501-172.17.0.2-1731961450756 heartbeating to localhost/127.0.0.1:35761 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1888943501-172.17.0.2-1731961450756 (Datanode Uuid de8f8b72-4878-4eee-8a07-b8fdcc8f78ca) service to localhost/127.0.0.1:35761 2024-11-18T20:24:51,681 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/cluster_f2d38c7a-5ca8-3461-d525-fb2eb9cf3895/data/data3/current/BP-1888943501-172.17.0.2-1731961450756 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:24:51,681 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/cluster_f2d38c7a-5ca8-3461-d525-fb2eb9cf3895/data/data4/current/BP-1888943501-172.17.0.2-1731961450756 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:24:51,682 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:24:51,685 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f5f0b5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:24:51,685 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@31e425dc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:24:51,685 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:24:51,686 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4bb23947{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:24:51,686 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35a5806e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/hadoop.log.dir/,STOPPED} 2024-11-18T20:24:51,688 WARN [BP-1888943501-172.17.0.2-1731961450756 heartbeating to localhost/127.0.0.1:35761 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:24:51,688 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:24:51,688 WARN [BP-1888943501-172.17.0.2-1731961450756 heartbeating to localhost/127.0.0.1:35761 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1888943501-172.17.0.2-1731961450756 (Datanode Uuid 43c461ee-1478-4e58-8fab-970efe1b68b4) service to localhost/127.0.0.1:35761 2024-11-18T20:24:51,688 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:24:51,689 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/cluster_f2d38c7a-5ca8-3461-d525-fb2eb9cf3895/data/data1/current/BP-1888943501-172.17.0.2-1731961450756 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:24:51,689 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/cluster_f2d38c7a-5ca8-3461-d525-fb2eb9cf3895/data/data2/current/BP-1888943501-172.17.0.2-1731961450756 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:24:51,689 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:24:51,698 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f63b03b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:24:51,699 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@622d58de{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:24:51,699 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:24:51,699 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e36d39c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:24:51,699 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bc081d8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/hadoop.log.dir/,STOPPED} 2024-11-18T20:24:51,707 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T20:24:51,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T20:24:51,765 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=231 (was 208) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35761 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35761 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35761 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:35761 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35761 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35761 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35761 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35761 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=522 (was 486) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=218 (was 315), ProcessCount=11 (was 11), AvailableMemoryMB=3233 (was 3945) 2024-11-18T20:24:51,776 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=231, OpenFileDescriptor=522, MaxFileDescriptor=1048576, SystemLoadAverage=218, ProcessCount=11, AvailableMemoryMB=3233 2024-11-18T20:24:51,777 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T20:24:51,777 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/hadoop.log.dir so I do NOT create it in target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674 2024-11-18T20:24:51,777 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/13c8d5dc-4ff3-169e-74b3-67ec2fd9543a/hadoop.tmp.dir so I do NOT create it in target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674 2024-11-18T20:24:51,777 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/cluster_34e920d6-3865-e44e-e2e2-76775664c6d5, deleteOnExit=true 2024-11-18T20:24:51,777 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T20:24:51,778 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/test.cache.data in system properties and HBase conf 2024-11-18T20:24:51,778 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T20:24:51,778 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/hadoop.log.dir in system properties and HBase conf 2024-11-18T20:24:51,778 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T20:24:51,778 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T20:24:51,778 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T20:24:51,778 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T20:24:51,778 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:24:51,779 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:24:51,779 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T20:24:51,779 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:24:51,779 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T20:24:51,779 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T20:24:51,779 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:24:51,779 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:24:51,779 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T20:24:51,779 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/nfs.dump.dir in system properties and HBase conf 2024-11-18T20:24:51,779 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/java.io.tmpdir in system properties and HBase conf 2024-11-18T20:24:51,779 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:24:51,780 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T20:24:51,780 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T20:24:51,799 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:24:51,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:51,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:52,000 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:24:52,004 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:24:52,005 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:24:52,005 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:24:52,005 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:24:52,006 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:24:52,007 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@238bf9b3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:24:52,007 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@814e400{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:24:52,115 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@60fdf071{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/java.io.tmpdir/jetty-localhost-41123-hadoop-hdfs-3_4_1-tests_jar-_-any-6273910932932000302/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:24:52,115 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3ffef2a8{HTTP/1.1, (http/1.1)}{localhost:41123} 2024-11-18T20:24:52,115 INFO [Time-limited test {}] server.Server(415): Started @283607ms 2024-11-18T20:24:52,130 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:24:52,302 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:24:52,305 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:24:52,315 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:24:52,315 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:24:52,315 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:24:52,315 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5eefcd14{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:24:52,316 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1773ea07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:24:52,420 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@68813b82{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/java.io.tmpdir/jetty-localhost-41727-hadoop-hdfs-3_4_1-tests_jar-_-any-2099596477689775584/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:24:52,421 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@509dedeb{HTTP/1.1, (http/1.1)}{localhost:41727} 2024-11-18T20:24:52,421 INFO [Time-limited test {}] server.Server(415): Started @283912ms 2024-11-18T20:24:52,422 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:24:52,481 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:24:52,484 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:24:52,485 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:24:52,485 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:24:52,485 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:24:52,485 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2269c58e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:24:52,486 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6df20715{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:24:52,610 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5991282a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/java.io.tmpdir/jetty-localhost-37341-hadoop-hdfs-3_4_1-tests_jar-_-any-9323945021722234867/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:24:52,610 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@73bc7eb2{HTTP/1.1, (http/1.1)}{localhost:37341} 2024-11-18T20:24:52,610 INFO [Time-limited test {}] server.Server(415): Started @284102ms 2024-11-18T20:24:52,612 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:24:52,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:52,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:53,057 WARN [Thread-2502 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/cluster_34e920d6-3865-e44e-e2e2-76775664c6d5/data/data2/current/BP-899648378-172.17.0.2-1731961491804/current, will proceed with Du for space computation calculation, 2024-11-18T20:24:53,057 WARN [Thread-2501 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/cluster_34e920d6-3865-e44e-e2e2-76775664c6d5/data/data1/current/BP-899648378-172.17.0.2-1731961491804/current, will proceed with Du for space computation calculation, 2024-11-18T20:24:53,088 WARN [Thread-2466 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:24:53,090 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcf666c9610e25385 with lease ID 0xd876379f87b48b86: Processing first storage report for DS-b4205e0b-72c5-4ae1-a2f7-1361cc962aa4 from datanode DatanodeRegistration(127.0.0.1:39761, datanodeUuid=60ca41f1-fc84-4a0f-b120-bf1a0d68653e, infoPort=33451, infoSecurePort=0, ipcPort=38685, storageInfo=lv=-57;cid=testClusterID;nsid=1733061468;c=1731961491804) 2024-11-18T20:24:53,091 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcf666c9610e25385 with lease ID 0xd876379f87b48b86: from storage DS-b4205e0b-72c5-4ae1-a2f7-1361cc962aa4 node DatanodeRegistration(127.0.0.1:39761, datanodeUuid=60ca41f1-fc84-4a0f-b120-bf1a0d68653e, infoPort=33451, infoSecurePort=0, ipcPort=38685, storageInfo=lv=-57;cid=testClusterID;nsid=1733061468;c=1731961491804), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:24:53,091 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcf666c9610e25385 with lease ID 0xd876379f87b48b86: Processing first storage report for DS-3cdcb064-be93-486e-8fa5-edc19527b993 from datanode DatanodeRegistration(127.0.0.1:39761, datanodeUuid=60ca41f1-fc84-4a0f-b120-bf1a0d68653e, infoPort=33451, infoSecurePort=0, ipcPort=38685, storageInfo=lv=-57;cid=testClusterID;nsid=1733061468;c=1731961491804) 2024-11-18T20:24:53,091 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcf666c9610e25385 with lease ID 0xd876379f87b48b86: from storage DS-3cdcb064-be93-486e-8fa5-edc19527b993 node DatanodeRegistration(127.0.0.1:39761, datanodeUuid=60ca41f1-fc84-4a0f-b120-bf1a0d68653e, infoPort=33451, infoSecurePort=0, ipcPort=38685, storageInfo=lv=-57;cid=testClusterID;nsid=1733061468;c=1731961491804), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:24:53,224 WARN [Thread-2514 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/cluster_34e920d6-3865-e44e-e2e2-76775664c6d5/data/data4/current/BP-899648378-172.17.0.2-1731961491804/current, will proceed with Du for space computation calculation, 2024-11-18T20:24:53,224 WARN [Thread-2513 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/cluster_34e920d6-3865-e44e-e2e2-76775664c6d5/data/data3/current/BP-899648378-172.17.0.2-1731961491804/current, will proceed with Du for space computation calculation, 2024-11-18T20:24:53,252 WARN [Thread-2489 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:24:53,270 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfb9121e4504cf5c2 with lease ID 0xd876379f87b48b87: Processing first storage report for DS-517d1924-ed8f-49cf-a4ea-11eab5a81a4f from datanode DatanodeRegistration(127.0.0.1:35841, datanodeUuid=f8452cf6-8861-4b31-9daf-1048da2ca9a6, infoPort=38405, infoSecurePort=0, ipcPort=43291, storageInfo=lv=-57;cid=testClusterID;nsid=1733061468;c=1731961491804) 2024-11-18T20:24:53,270 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfb9121e4504cf5c2 with lease ID 0xd876379f87b48b87: from storage DS-517d1924-ed8f-49cf-a4ea-11eab5a81a4f node DatanodeRegistration(127.0.0.1:35841, datanodeUuid=f8452cf6-8861-4b31-9daf-1048da2ca9a6, infoPort=38405, infoSecurePort=0, ipcPort=43291, storageInfo=lv=-57;cid=testClusterID;nsid=1733061468;c=1731961491804), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:24:53,270 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfb9121e4504cf5c2 with lease ID 0xd876379f87b48b87: Processing first storage report for DS-08d3801b-9588-460b-8907-1c86b2c47473 from datanode DatanodeRegistration(127.0.0.1:35841, datanodeUuid=f8452cf6-8861-4b31-9daf-1048da2ca9a6, infoPort=38405, infoSecurePort=0, ipcPort=43291, storageInfo=lv=-57;cid=testClusterID;nsid=1733061468;c=1731961491804) 2024-11-18T20:24:53,270 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfb9121e4504cf5c2 with lease ID 0xd876379f87b48b87: from storage DS-08d3801b-9588-460b-8907-1c86b2c47473 node DatanodeRegistration(127.0.0.1:35841, datanodeUuid=f8452cf6-8861-4b31-9daf-1048da2ca9a6, infoPort=38405, infoSecurePort=0, ipcPort=43291, storageInfo=lv=-57;cid=testClusterID;nsid=1733061468;c=1731961491804), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:24:53,337 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674 2024-11-18T20:24:53,340 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/cluster_34e920d6-3865-e44e-e2e2-76775664c6d5/zookeeper_0, clientPort=56351, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/cluster_34e920d6-3865-e44e-e2e2-76775664c6d5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/cluster_34e920d6-3865-e44e-e2e2-76775664c6d5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T20:24:53,341 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56351 2024-11-18T20:24:53,341 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:24:53,343 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:24:53,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:24:53,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39761 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:24:53,354 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653 with version=8 2024-11-18T20:24:53,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37701/user/jenkins/test-data/a71a2cbc-5eb7-ba03-2b03-5838d283b578/hbase-staging 2024-11-18T20:24:53,356 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5a964fc427ed:0 server-side Connection retries=45 2024-11-18T20:24:53,356 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:24:53,356 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:24:53,356 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:24:53,356 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:24:53,356 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:24:53,357 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T20:24:53,357 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:24:53,357 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44199 2024-11-18T20:24:53,358 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44199 connecting to ZooKeeper ensemble=127.0.0.1:56351 2024-11-18T20:24:53,402 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:441990x0, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:24:53,403 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44199-0x10150ccdd040000 connected 2024-11-18T20:24:53,464 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:24:53,465 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:24:53,468 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:24:53,468 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653, hbase.cluster.distributed=false 2024-11-18T20:24:53,469 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:24:53,470 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44199 2024-11-18T20:24:53,470 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44199 2024-11-18T20:24:53,470 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44199 2024-11-18T20:24:53,470 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44199 2024-11-18T20:24:53,470 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44199 2024-11-18T20:24:53,490 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5a964fc427ed:0 server-side Connection retries=45 2024-11-18T20:24:53,490 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:24:53,490 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:24:53,490 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:24:53,490 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:24:53,490 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:24:53,490 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T20:24:53,491 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:24:53,491 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38013 2024-11-18T20:24:53,492 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38013 connecting to ZooKeeper ensemble=127.0.0.1:56351 2024-11-18T20:24:53,493 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:24:53,495 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:24:53,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:380130x0, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:24:53,506 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:380130x0, quorum=127.0.0.1:56351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:24:53,506 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38013-0x10150ccdd040001 connected 2024-11-18T20:24:53,506 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T20:24:53,507 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T20:24:53,507 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38013-0x10150ccdd040001, quorum=127.0.0.1:56351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T20:24:53,508 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38013-0x10150ccdd040001, quorum=127.0.0.1:56351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:24:53,508 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38013 2024-11-18T20:24:53,509 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38013 2024-11-18T20:24:53,509 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38013 2024-11-18T20:24:53,509 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38013 2024-11-18T20:24:53,509 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38013 2024-11-18T20:24:53,523 DEBUG [M:0;5a964fc427ed:44199 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5a964fc427ed:44199 2024-11-18T20:24:53,524 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5a964fc427ed,44199,1731961493356 2024-11-18T20:24:53,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:24:53,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38013-0x10150ccdd040001, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:24:53,530 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5a964fc427ed,44199,1731961493356 2024-11-18T20:24:53,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:53,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38013-0x10150ccdd040001, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T20:24:53,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38013-0x10150ccdd040001, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:53,539 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T20:24:53,540 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5a964fc427ed,44199,1731961493356 from backup master directory 2024-11-18T20:24:53,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5a964fc427ed,44199,1731961493356 2024-11-18T20:24:53,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38013-0x10150ccdd040001, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:24:53,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:24:53,547 WARN [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:24:53,547 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5a964fc427ed,44199,1731961493356 2024-11-18T20:24:53,552 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/hbase.id] with ID: 65c78903-636b-4c4b-8a94-22e68dccd462 2024-11-18T20:24:53,552 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/.tmp/hbase.id 2024-11-18T20:24:53,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39761 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:24:53,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:24:53,558 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/.tmp/hbase.id]:[hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/hbase.id] 2024-11-18T20:24:53,569 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:24:53,569 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T20:24:53,571 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-18T20:24:53,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38013-0x10150ccdd040001, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:53,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:53,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39761 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:24:53,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:24:53,587 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:24:53,588 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T20:24:53,588 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:24:53,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:24:53,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39761 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:24:53,599 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store 2024-11-18T20:24:53,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:24:53,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39761 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:24:53,607 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:24:53,607 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:24:53,607 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:24:53,607 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:24:53,607 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:24:53,607 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:24:53,607 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:24:53,607 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961493607Disabling compacts and flushes for region at 1731961493607Disabling writes for close at 1731961493607Writing region close event to WAL at 1731961493607Closed at 1731961493607 2024-11-18T20:24:53,608 WARN [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/.initializing 2024-11-18T20:24:53,608 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/WALs/5a964fc427ed,44199,1731961493356 2024-11-18T20:24:53,610 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5a964fc427ed%2C44199%2C1731961493356, suffix=, logDir=hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/WALs/5a964fc427ed,44199,1731961493356, archiveDir=hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/oldWALs, maxLogs=10 2024-11-18T20:24:53,610 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C44199%2C1731961493356.1731961493610 2024-11-18T20:24:53,615 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/WALs/5a964fc427ed,44199,1731961493356/5a964fc427ed%2C44199%2C1731961493356.1731961493610 2024-11-18T20:24:53,621 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38405:38405),(127.0.0.1/127.0.0.1:33451:33451)] 2024-11-18T20:24:53,622 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:24:53,622 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:24:53,622 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:53,622 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:53,623 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:53,625 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T20:24:53,625 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:53,625 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:24:53,625 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:53,626 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T20:24:53,626 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:53,627 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:24:53,627 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:53,628 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T20:24:53,628 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:53,628 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:24:53,629 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:53,629 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T20:24:53,630 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:53,630 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:24:53,630 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:53,631 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:53,631 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:53,632 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:53,633 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:53,633 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T20:24:53,634 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:24:53,637 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:24:53,637 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=873178, jitterRate=0.11030378937721252}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T20:24:53,638 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731961493622Initializing all the Stores at 1731961493623 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961493623Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961493623Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961493623Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961493623Cleaning up temporary data from old regions at 1731961493633 (+10 ms)Region opened successfully at 1731961493638 (+5 ms) 2024-11-18T20:24:53,638 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T20:24:53,642 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65ff23c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5a964fc427ed/172.17.0.2:0 2024-11-18T20:24:53,643 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T20:24:53,643 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T20:24:53,643 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T20:24:53,643 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T20:24:53,644 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T20:24:53,644 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T20:24:53,644 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T20:24:53,646 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T20:24:53,647 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T20:24:53,655 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T20:24:53,655 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T20:24:53,656 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T20:24:53,663 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T20:24:53,664 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T20:24:53,665 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T20:24:53,672 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T20:24:53,673 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T20:24:53,680 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T20:24:53,683 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T20:24:53,688 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T20:24:53,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38013-0x10150ccdd040001, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:24:53,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:24:53,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38013-0x10150ccdd040001, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:53,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:53,698 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5a964fc427ed,44199,1731961493356, sessionid=0x10150ccdd040000, setting cluster-up flag (Was=false) 2024-11-18T20:24:53,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:53,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38013-0x10150ccdd040001, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:53,739 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T20:24:53,740 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5a964fc427ed,44199,1731961493356 2024-11-18T20:24:53,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:53,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38013-0x10150ccdd040001, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:53,780 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T20:24:53,781 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5a964fc427ed,44199,1731961493356 2024-11-18T20:24:53,782 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T20:24:53,784 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T20:24:53,784 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T20:24:53,784 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T20:24:53,784 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5a964fc427ed,44199,1731961493356 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T20:24:53,785 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:24:53,786 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:24:53,786 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:24:53,786 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5a964fc427ed:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:24:53,786 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5a964fc427ed:0, corePoolSize=10, maxPoolSize=10 2024-11-18T20:24:53,786 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:53,786 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5a964fc427ed:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:24:53,786 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:53,786 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731961523786 2024-11-18T20:24:53,786 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T20:24:53,787 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T20:24:53,787 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T20:24:53,787 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T20:24:53,787 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T20:24:53,787 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T20:24:53,787 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:53,787 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T20:24:53,787 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:24:53,787 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T20:24:53,787 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T20:24:53,787 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T20:24:53,788 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T20:24:53,788 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T20:24:53,788 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.large.0-1731961493788,5,FailOnTimeoutGroup] 2024-11-18T20:24:53,788 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.small.0-1731961493788,5,FailOnTimeoutGroup] 2024-11-18T20:24:53,788 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:53,788 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T20:24:53,788 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:53,788 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:53,788 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:53,789 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T20:24:53,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39761 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:24:53,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:24:53,798 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T20:24:53,799 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653 2024-11-18T20:24:53,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:24:53,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39761 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:24:53,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:24:53,808 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:24:53,809 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:24:53,809 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:53,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:24:53,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:24:53,810 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:24:53,811 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:53,811 INFO [RS:0;5a964fc427ed:38013 {}] regionserver.HRegionServer(746): ClusterId : 65c78903-636b-4c4b-8a94-22e68dccd462 2024-11-18T20:24:53,811 DEBUG [RS:0;5a964fc427ed:38013 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T20:24:53,811 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:24:53,811 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:24:53,812 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:24:53,812 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:53,813 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:24:53,813 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:24:53,814 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:24:53,814 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:53,814 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:24:53,814 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:24:53,815 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/data/hbase/meta/1588230740 2024-11-18T20:24:53,815 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/data/hbase/meta/1588230740 2024-11-18T20:24:53,816 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:24:53,816 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:24:53,817 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:24:53,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:24:53,819 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:24:53,820 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=744173, jitterRate=-0.053736090660095215}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:24:53,820 DEBUG [RS:0;5a964fc427ed:38013 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T20:24:53,820 DEBUG [RS:0;5a964fc427ed:38013 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T20:24:53,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731961493807Initializing all the Stores at 1731961493807Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961493807Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961493808 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961493808Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961493808Cleaning up temporary data from old regions at 1731961493816 (+8 ms)Region opened successfully at 1731961493820 (+4 ms) 2024-11-18T20:24:53,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:24:53,821 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:24:53,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:24:53,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:24:53,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:24:53,823 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:24:53,823 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961493821Disabling compacts and flushes for region at 1731961493821Disabling writes for close at 1731961493821Writing region close event to WAL at 1731961493823 (+2 ms)Closed at 1731961493823 2024-11-18T20:24:53,824 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:24:53,824 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T20:24:53,824 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T20:24:53,825 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:24:53,826 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T20:24:53,831 DEBUG [RS:0;5a964fc427ed:38013 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T20:24:53,831 DEBUG [RS:0;5a964fc427ed:38013 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29b6d1c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5a964fc427ed/172.17.0.2:0 2024-11-18T20:24:53,844 DEBUG [RS:0;5a964fc427ed:38013 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5a964fc427ed:38013 2024-11-18T20:24:53,844 INFO [RS:0;5a964fc427ed:38013 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T20:24:53,844 INFO [RS:0;5a964fc427ed:38013 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T20:24:53,844 DEBUG [RS:0;5a964fc427ed:38013 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T20:24:53,845 INFO [RS:0;5a964fc427ed:38013 {}] regionserver.HRegionServer(2659): reportForDuty to master=5a964fc427ed,44199,1731961493356 with port=38013, startcode=1731961493490 2024-11-18T20:24:53,845 DEBUG [RS:0;5a964fc427ed:38013 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T20:24:53,847 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58991, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T20:24:53,848 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44199 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5a964fc427ed,38013,1731961493490 2024-11-18T20:24:53,848 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44199 {}] master.ServerManager(517): Registering regionserver=5a964fc427ed,38013,1731961493490 2024-11-18T20:24:53,849 DEBUG [RS:0;5a964fc427ed:38013 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653 2024-11-18T20:24:53,849 DEBUG [RS:0;5a964fc427ed:38013 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43753 2024-11-18T20:24:53,849 DEBUG [RS:0;5a964fc427ed:38013 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T20:24:53,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:24:53,855 DEBUG [RS:0;5a964fc427ed:38013 {}] zookeeper.ZKUtil(111): regionserver:38013-0x10150ccdd040001, quorum=127.0.0.1:56351, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5a964fc427ed,38013,1731961493490 2024-11-18T20:24:53,856 WARN [RS:0;5a964fc427ed:38013 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:24:53,856 INFO [RS:0;5a964fc427ed:38013 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:24:53,856 DEBUG [RS:0;5a964fc427ed:38013 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/WALs/5a964fc427ed,38013,1731961493490 2024-11-18T20:24:53,867 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5a964fc427ed,38013,1731961493490] 2024-11-18T20:24:53,875 INFO [RS:0;5a964fc427ed:38013 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T20:24:53,876 INFO [RS:0;5a964fc427ed:38013 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T20:24:53,876 INFO [RS:0;5a964fc427ed:38013 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T20:24:53,876 INFO [RS:0;5a964fc427ed:38013 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:53,876 INFO [RS:0;5a964fc427ed:38013 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T20:24:53,877 INFO [RS:0;5a964fc427ed:38013 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T20:24:53,877 INFO [RS:0;5a964fc427ed:38013 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:53,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:53,877 DEBUG [RS:0;5a964fc427ed:38013 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:53,878 DEBUG [RS:0;5a964fc427ed:38013 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:53,878 DEBUG [RS:0;5a964fc427ed:38013 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:53,878 DEBUG [RS:0;5a964fc427ed:38013 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:53,878 DEBUG [RS:0;5a964fc427ed:38013 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:53,878 DEBUG [RS:0;5a964fc427ed:38013 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5a964fc427ed:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:24:53,878 DEBUG [RS:0;5a964fc427ed:38013 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:53,878 DEBUG [RS:0;5a964fc427ed:38013 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:53,878 DEBUG [RS:0;5a964fc427ed:38013 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:53,878 DEBUG [RS:0;5a964fc427ed:38013 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:53,878 DEBUG [RS:0;5a964fc427ed:38013 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:53,878 DEBUG [RS:0;5a964fc427ed:38013 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5a964fc427ed:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:24:53,878 DEBUG [RS:0;5a964fc427ed:38013 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5a964fc427ed:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:24:53,878 DEBUG [RS:0;5a964fc427ed:38013 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5a964fc427ed:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:24:53,878 INFO [RS:0;5a964fc427ed:38013 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:53,878 INFO [RS:0;5a964fc427ed:38013 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:53,878 INFO [RS:0;5a964fc427ed:38013 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:53,878 INFO [RS:0;5a964fc427ed:38013 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:53,878 INFO [RS:0;5a964fc427ed:38013 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:53,878 INFO [RS:0;5a964fc427ed:38013 {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,38013,1731961493490-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:24:53,894 INFO [RS:0;5a964fc427ed:38013 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T20:24:53,894 INFO [RS:0;5a964fc427ed:38013 {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,38013,1731961493490-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:53,894 INFO [RS:0;5a964fc427ed:38013 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:53,894 INFO [RS:0;5a964fc427ed:38013 {}] regionserver.Replication(171): 5a964fc427ed,38013,1731961493490 started 2024-11-18T20:24:53,910 INFO [RS:0;5a964fc427ed:38013 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:53,910 INFO [RS:0;5a964fc427ed:38013 {}] regionserver.HRegionServer(1482): Serving as 5a964fc427ed,38013,1731961493490, RpcServer on 5a964fc427ed/172.17.0.2:38013, sessionid=0x10150ccdd040001 2024-11-18T20:24:53,910 DEBUG [RS:0;5a964fc427ed:38013 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T20:24:53,910 DEBUG [RS:0;5a964fc427ed:38013 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5a964fc427ed,38013,1731961493490 2024-11-18T20:24:53,910 DEBUG [RS:0;5a964fc427ed:38013 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5a964fc427ed,38013,1731961493490' 2024-11-18T20:24:53,910 DEBUG [RS:0;5a964fc427ed:38013 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T20:24:53,911 DEBUG [RS:0;5a964fc427ed:38013 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T20:24:53,911 DEBUG [RS:0;5a964fc427ed:38013 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T20:24:53,911 DEBUG [RS:0;5a964fc427ed:38013 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T20:24:53,911 DEBUG [RS:0;5a964fc427ed:38013 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5a964fc427ed,38013,1731961493490 2024-11-18T20:24:53,911 DEBUG [RS:0;5a964fc427ed:38013 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5a964fc427ed,38013,1731961493490' 2024-11-18T20:24:53,911 DEBUG [RS:0;5a964fc427ed:38013 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T20:24:53,911 DEBUG [RS:0;5a964fc427ed:38013 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T20:24:53,912 DEBUG [RS:0;5a964fc427ed:38013 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T20:24:53,912 INFO [RS:0;5a964fc427ed:38013 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T20:24:53,912 INFO [RS:0;5a964fc427ed:38013 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T20:24:53,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:53,976 WARN [5a964fc427ed:44199 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T20:24:54,013 INFO [RS:0;5a964fc427ed:38013 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5a964fc427ed%2C38013%2C1731961493490, suffix=, logDir=hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/WALs/5a964fc427ed,38013,1731961493490, archiveDir=hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/oldWALs, maxLogs=32 2024-11-18T20:24:54,014 INFO [RS:0;5a964fc427ed:38013 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C38013%2C1731961493490.1731961494014 2024-11-18T20:24:54,019 INFO [RS:0;5a964fc427ed:38013 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/WALs/5a964fc427ed,38013,1731961493490/5a964fc427ed%2C38013%2C1731961493490.1731961494014 2024-11-18T20:24:54,020 DEBUG [RS:0;5a964fc427ed:38013 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33451:33451),(127.0.0.1/127.0.0.1:38405:38405)] 2024-11-18T20:24:54,227 DEBUG [5a964fc427ed:44199 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T20:24:54,227 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5a964fc427ed,38013,1731961493490 2024-11-18T20:24:54,228 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5a964fc427ed,38013,1731961493490, state=OPENING 2024-11-18T20:24:54,238 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T20:24:54,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38013-0x10150ccdd040001, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:54,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:54,247 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:24:54,247 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:24:54,247 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:24:54,247 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5a964fc427ed,38013,1731961493490}] 2024-11-18T20:24:54,400 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T20:24:54,402 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35513, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T20:24:54,406 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T20:24:54,406 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:24:54,407 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5a964fc427ed%2C38013%2C1731961493490.meta, suffix=.meta, logDir=hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/WALs/5a964fc427ed,38013,1731961493490, archiveDir=hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/oldWALs, maxLogs=32 2024-11-18T20:24:54,408 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 5a964fc427ed%2C38013%2C1731961493490.meta.1731961494408.meta 2024-11-18T20:24:54,414 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/WALs/5a964fc427ed,38013,1731961493490/5a964fc427ed%2C38013%2C1731961493490.meta.1731961494408.meta 2024-11-18T20:24:54,415 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33451:33451),(127.0.0.1/127.0.0.1:38405:38405)] 2024-11-18T20:24:54,416 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:24:54,416 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T20:24:54,416 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T20:24:54,416 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T20:24:54,416 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T20:24:54,416 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:24:54,416 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T20:24:54,416 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T20:24:54,418 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:24:54,419 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:24:54,419 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:54,419 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:24:54,419 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:24:54,420 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:24:54,420 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:54,420 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:24:54,420 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:24:54,421 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:24:54,421 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:54,421 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:24:54,421 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:24:54,422 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:24:54,422 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:24:54,422 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:24:54,422 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:24:54,423 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/data/hbase/meta/1588230740 2024-11-18T20:24:54,424 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/data/hbase/meta/1588230740 2024-11-18T20:24:54,425 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:24:54,425 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:24:54,426 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:24:54,427 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:24:54,428 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=765883, jitterRate=-0.02613021433353424}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:24:54,428 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T20:24:54,429 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731961494417Writing region info on filesystem at 1731961494417Initializing all the Stores at 1731961494417Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961494417Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961494418 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961494418Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961494418Cleaning up temporary data from old regions at 1731961494425 (+7 ms)Running coprocessor post-open hooks at 1731961494428 (+3 ms)Region opened successfully at 1731961494429 (+1 ms) 2024-11-18T20:24:54,430 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731961494400 2024-11-18T20:24:54,432 DEBUG [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T20:24:54,432 INFO [RS_OPEN_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T20:24:54,432 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5a964fc427ed,38013,1731961493490 2024-11-18T20:24:54,433 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5a964fc427ed,38013,1731961493490, state=OPEN 2024-11-18T20:24:54,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:24:54,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38013-0x10150ccdd040001, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:24:54,457 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:24:54,457 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5a964fc427ed,38013,1731961493490 2024-11-18T20:24:54,457 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:24:54,459 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T20:24:54,459 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5a964fc427ed,38013,1731961493490 in 210 msec 2024-11-18T20:24:54,461 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T20:24:54,461 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 635 msec 2024-11-18T20:24:54,462 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:24:54,462 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T20:24:54,463 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:24:54,463 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5a964fc427ed,38013,1731961493490, seqNum=-1] 2024-11-18T20:24:54,463 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:24:54,465 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50903, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:24:54,469 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 685 msec 2024-11-18T20:24:54,469 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731961494469, completionTime=-1 2024-11-18T20:24:54,469 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T20:24:54,470 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T20:24:54,472 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T20:24:54,472 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731961554472 2024-11-18T20:24:54,472 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731961614472 2024-11-18T20:24:54,472 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-18T20:24:54,472 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,44199,1731961493356-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:54,472 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,44199,1731961493356-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:54,472 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,44199,1731961493356-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:54,472 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5a964fc427ed:44199, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:54,472 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:54,472 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:54,474 DEBUG [master/5a964fc427ed:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T20:24:54,476 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.929sec 2024-11-18T20:24:54,477 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T20:24:54,477 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T20:24:54,477 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T20:24:54,477 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T20:24:54,477 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T20:24:54,477 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,44199,1731961493356-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:24:54,477 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,44199,1731961493356-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T20:24:54,479 DEBUG [master/5a964fc427ed:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T20:24:54,479 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T20:24:54,479 INFO [master/5a964fc427ed:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5a964fc427ed,44199,1731961493356-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:24:54,512 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44ac6377, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:24:54,512 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5a964fc427ed,44199,-1 for getting cluster id 2024-11-18T20:24:54,512 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T20:24:54,513 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '65c78903-636b-4c4b-8a94-22e68dccd462' 2024-11-18T20:24:54,514 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T20:24:54,514 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "65c78903-636b-4c4b-8a94-22e68dccd462" 2024-11-18T20:24:54,514 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4df37720, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:24:54,514 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5a964fc427ed,44199,-1] 2024-11-18T20:24:54,514 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T20:24:54,514 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:24:54,516 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59132, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T20:24:54,516 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4457fb1e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:24:54,516 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:24:54,517 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5a964fc427ed,38013,1731961493490, seqNum=-1] 2024-11-18T20:24:54,518 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:24:54,519 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52874, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:24:54,521 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5a964fc427ed,44199,1731961493356 2024-11-18T20:24:54,521 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:24:54,524 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T20:24:54,524 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:24:54,525 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/WALs/test.com,8080,1, archiveDir=hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/oldWALs, maxLogs=32 2024-11-18T20:24:54,526 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731961494526 2024-11-18T20:24:54,539 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/WALs/test.com,8080,1/test.com%2C8080%2C1.1731961494526 2024-11-18T20:24:54,542 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33451:33451),(127.0.0.1/127.0.0.1:38405:38405)] 2024-11-18T20:24:54,543 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731961494543 2024-11-18T20:24:54,547 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,547 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,547 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,548 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,548 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,548 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/WALs/test.com,8080,1/test.com%2C8080%2C1.1731961494526 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/WALs/test.com,8080,1/test.com%2C8080%2C1.1731961494543 2024-11-18T20:24:54,548 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38405:38405),(127.0.0.1/127.0.0.1:33451:33451)] 2024-11-18T20:24:54,548 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/WALs/test.com,8080,1/test.com%2C8080%2C1.1731961494526 is not closed yet, will try archiving it next time 2024-11-18T20:24:54,549 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,549 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741835_1011 (size=93) 2024-11-18T20:24:54,549 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,549 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,549 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39761 is added to blk_1073741835_1011 (size=93) 2024-11-18T20:24:54,550 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/WALs/test.com,8080,1/test.com%2C8080%2C1.1731961494526 to hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/oldWALs/test.com%2C8080%2C1.1731961494526 2024-11-18T20:24:54,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39761 is added to blk_1073741836_1012 (size=93) 2024-11-18T20:24:54,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741836_1012 (size=93) 2024-11-18T20:24:54,553 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/oldWALs 2024-11-18T20:24:54,553 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731961494543) 2024-11-18T20:24:54,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T20:24:54,553 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:24:54,553 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:24:54,553 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:24:54,553 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:24:54,554 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T20:24:54,554 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T20:24:54,554 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=157014924, stopped=false 2024-11-18T20:24:54,554 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5a964fc427ed,44199,1731961493356 2024-11-18T20:24:54,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38013-0x10150ccdd040001, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:24:54,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38013-0x10150ccdd040001, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:54,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:24:54,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:54,564 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:24:54,564 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:24:54,564 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:24:54,564 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:24:54,564 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5a964fc427ed,38013,1731961493490' ***** 2024-11-18T20:24:54,564 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T20:24:54,564 INFO [RS:0;5a964fc427ed:38013 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T20:24:54,564 INFO [RS:0;5a964fc427ed:38013 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T20:24:54,564 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T20:24:54,564 INFO [RS:0;5a964fc427ed:38013 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T20:24:54,564 INFO [RS:0;5a964fc427ed:38013 {}] regionserver.HRegionServer(959): stopping server 5a964fc427ed,38013,1731961493490 2024-11-18T20:24:54,565 INFO [RS:0;5a964fc427ed:38013 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:24:54,565 INFO [RS:0;5a964fc427ed:38013 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5a964fc427ed:38013. 2024-11-18T20:24:54,565 DEBUG [RS:0;5a964fc427ed:38013 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:24:54,565 DEBUG [RS:0;5a964fc427ed:38013 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:24:54,565 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38013-0x10150ccdd040001, quorum=127.0.0.1:56351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:24:54,565 INFO [RS:0;5a964fc427ed:38013 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T20:24:54,565 INFO [RS:0;5a964fc427ed:38013 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T20:24:54,565 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:24:54,565 INFO [RS:0;5a964fc427ed:38013 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T20:24:54,565 INFO [RS:0;5a964fc427ed:38013 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T20:24:54,565 INFO [RS:0;5a964fc427ed:38013 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-18T20:24:54,565 DEBUG [RS:0;5a964fc427ed:38013 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-18T20:24:54,565 DEBUG [RS:0;5a964fc427ed:38013 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T20:24:54,565 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:24:54,565 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:24:54,565 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:24:54,565 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:24:54,565 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:24:54,566 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-18T20:24:54,582 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/data/hbase/meta/1588230740/.tmp/ns/20a198763f234462884973f3d9211e51 is 43, key is default/ns:d/1731961494465/Put/seqid=0 2024-11-18T20:24:54,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39761 is added to blk_1073741837_1013 (size=5153) 2024-11-18T20:24:54,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741837_1013 (size=5153) 2024-11-18T20:24:54,587 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/data/hbase/meta/1588230740/.tmp/ns/20a198763f234462884973f3d9211e51 2024-11-18T20:24:54,593 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/data/hbase/meta/1588230740/.tmp/ns/20a198763f234462884973f3d9211e51 as hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/data/hbase/meta/1588230740/ns/20a198763f234462884973f3d9211e51 2024-11-18T20:24:54,598 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/data/hbase/meta/1588230740/ns/20a198763f234462884973f3d9211e51, entries=2, sequenceid=6, filesize=5.0 K 2024-11-18T20:24:54,599 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 34ms, sequenceid=6, compaction requested=false 2024-11-18T20:24:54,603 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-18T20:24:54,603 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:24:54,604 INFO [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:24:54,604 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961494565Running coprocessor pre-close hooks at 1731961494565Disabling compacts and flushes for region at 1731961494565Disabling writes for close at 1731961494565Obtaining lock to block concurrent updates at 1731961494566 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731961494566Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731961494566Flushing stores of hbase:meta,,1.1588230740 at 1731961494566Flushing 1588230740/ns: creating writer at 1731961494567 (+1 ms)Flushing 1588230740/ns: appending metadata at 1731961494582 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731961494582Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1fc8ec8c: reopening flushed file at 1731961494592 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 34ms, sequenceid=6, compaction requested=false at 1731961494599 (+7 ms)Writing region close event to WAL at 1731961494599Running coprocessor post-close hooks at 1731961494603 (+4 ms)Closed at 1731961494604 (+1 ms) 2024-11-18T20:24:54,604 DEBUG [RS_CLOSE_META-regionserver/5a964fc427ed:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T20:24:54,765 INFO [RS:0;5a964fc427ed:38013 {}] regionserver.HRegionServer(976): stopping server 5a964fc427ed,38013,1731961493490; all regions closed. 2024-11-18T20:24:54,766 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,766 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,766 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,767 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,767 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741834_1010 (size=1152) 2024-11-18T20:24:54,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39761 is added to blk_1073741834_1010 (size=1152) 2024-11-18T20:24:54,771 DEBUG [RS:0;5a964fc427ed:38013 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/oldWALs 2024-11-18T20:24:54,771 INFO [RS:0;5a964fc427ed:38013 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5a964fc427ed%2C38013%2C1731961493490.meta:.meta(num 1731961494408) 2024-11-18T20:24:54,772 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,772 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,772 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,772 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,772 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741833_1009 (size=93) 2024-11-18T20:24:54,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39761 is added to blk_1073741833_1009 (size=93) 2024-11-18T20:24:54,776 DEBUG [RS:0;5a964fc427ed:38013 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/oldWALs 2024-11-18T20:24:54,776 INFO [RS:0;5a964fc427ed:38013 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5a964fc427ed%2C38013%2C1731961493490:(num 1731961494014) 2024-11-18T20:24:54,776 DEBUG [RS:0;5a964fc427ed:38013 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:24:54,776 INFO [RS:0;5a964fc427ed:38013 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:24:54,776 INFO [RS:0;5a964fc427ed:38013 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:24:54,776 INFO [RS:0;5a964fc427ed:38013 {}] hbase.ChoreService(370): Chore service for: regionserver/5a964fc427ed:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T20:24:54,776 INFO [RS:0;5a964fc427ed:38013 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:24:54,776 INFO [regionserver/5a964fc427ed:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:24:54,777 INFO [RS:0;5a964fc427ed:38013 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38013 2024-11-18T20:24:54,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38013-0x10150ccdd040001, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5a964fc427ed,38013,1731961493490 2024-11-18T20:24:54,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:24:54,786 INFO [RS:0;5a964fc427ed:38013 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:24:54,795 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5a964fc427ed,38013,1731961493490] 2024-11-18T20:24:54,803 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5a964fc427ed,38013,1731961493490 already deleted, retry=false 2024-11-18T20:24:54,803 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5a964fc427ed,38013,1731961493490 expired; onlineServers=0 2024-11-18T20:24:54,803 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5a964fc427ed,44199,1731961493356' ***** 2024-11-18T20:24:54,803 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T20:24:54,803 INFO [M:0;5a964fc427ed:44199 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:24:54,803 INFO [M:0;5a964fc427ed:44199 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:24:54,803 DEBUG [M:0;5a964fc427ed:44199 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T20:24:54,803 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T20:24:54,803 DEBUG [M:0;5a964fc427ed:44199 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T20:24:54,803 DEBUG [master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.large.0-1731961493788 {}] cleaner.HFileCleaner(306): Exit Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.large.0-1731961493788,5,FailOnTimeoutGroup] 2024-11-18T20:24:54,803 DEBUG [master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.small.0-1731961493788 {}] cleaner.HFileCleaner(306): Exit Thread[master/5a964fc427ed:0:becomeActiveMaster-HFileCleaner.small.0-1731961493788,5,FailOnTimeoutGroup] 2024-11-18T20:24:54,803 INFO [M:0;5a964fc427ed:44199 {}] hbase.ChoreService(370): Chore service for: master/5a964fc427ed:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T20:24:54,803 INFO [M:0;5a964fc427ed:44199 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:24:54,803 DEBUG [M:0;5a964fc427ed:44199 {}] master.HMaster(1795): Stopping service threads 2024-11-18T20:24:54,804 INFO [M:0;5a964fc427ed:44199 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T20:24:54,804 INFO [M:0;5a964fc427ed:44199 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:24:54,804 INFO [M:0;5a964fc427ed:44199 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T20:24:54,804 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T20:24:54,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T20:24:54,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:24:54,813 DEBUG [M:0;5a964fc427ed:44199 {}] zookeeper.ZKUtil(347): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T20:24:54,814 WARN [M:0;5a964fc427ed:44199 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T20:24:54,814 INFO [M:0;5a964fc427ed:44199 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/.lastflushedseqids 2024-11-18T20:24:54,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39761 is added to blk_1073741838_1014 (size=99) 2024-11-18T20:24:54,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741838_1014 (size=99) 2024-11-18T20:24:54,820 INFO [M:0;5a964fc427ed:44199 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T20:24:54,820 INFO [M:0;5a964fc427ed:44199 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T20:24:54,820 DEBUG [M:0;5a964fc427ed:44199 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:24:54,820 INFO [M:0;5a964fc427ed:44199 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:24:54,821 DEBUG [M:0;5a964fc427ed:44199 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:24:54,821 DEBUG [M:0;5a964fc427ed:44199 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:24:54,821 DEBUG [M:0;5a964fc427ed:44199 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:24:54,821 INFO [M:0;5a964fc427ed:44199 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-18T20:24:54,839 DEBUG [M:0;5a964fc427ed:44199 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1590f96c2a424e00b2a5b0555bb6c131 is 82, key is hbase:meta,,1/info:regioninfo/1731961494432/Put/seqid=0 2024-11-18T20:24:54,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741839_1015 (size=5672) 2024-11-18T20:24:54,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39761 is added to blk_1073741839_1015 (size=5672) 2024-11-18T20:24:54,843 INFO [M:0;5a964fc427ed:44199 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1590f96c2a424e00b2a5b0555bb6c131 2024-11-18T20:24:54,868 DEBUG [M:0;5a964fc427ed:44199 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9fa1d4d784e14c189068d27cd86552e6 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731961494469/Put/seqid=0 2024-11-18T20:24:54,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741840_1016 (size=5275) 2024-11-18T20:24:54,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39761 is added to blk_1073741840_1016 (size=5275) 2024-11-18T20:24:54,873 INFO [M:0;5a964fc427ed:44199 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9fa1d4d784e14c189068d27cd86552e6 2024-11-18T20:24:54,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,35553,1731961316245/5a964fc427ed%2C35553%2C1731961316245.1731961316487 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:54,893 DEBUG [M:0;5a964fc427ed:44199 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6bc0ec37de984ed79f294e7beb5395b2 is 69, key is 5a964fc427ed,38013,1731961493490/rs:state/1731961493848/Put/seqid=0 2024-11-18T20:24:54,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38013-0x10150ccdd040001, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:24:54,895 INFO [RS:0;5a964fc427ed:38013 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:24:54,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38013-0x10150ccdd040001, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:24:54,895 INFO [RS:0;5a964fc427ed:38013 {}] regionserver.HRegionServer(1031): Exiting; stopping=5a964fc427ed,38013,1731961493490; zookeeper connection closed. 2024-11-18T20:24:54,895 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@38b26971 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@38b26971 2024-11-18T20:24:54,895 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T20:24:54,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741841_1017 (size=5156) 2024-11-18T20:24:54,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39761 is added to blk_1073741841_1017 (size=5156) 2024-11-18T20:24:54,898 INFO [M:0;5a964fc427ed:44199 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6bc0ec37de984ed79f294e7beb5395b2 2024-11-18T20:24:54,919 DEBUG [M:0;5a964fc427ed:44199 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6cf8d98575044d32a4aad9fa1e345ed9 is 52, key is load_balancer_on/state:d/1731961494523/Put/seqid=0 2024-11-18T20:24:54,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33409/user/jenkins/test-data/f70036a4-e7e4-cc42-18f5-f2775d340abf/WALs/5a964fc427ed,38299,1731961314990/5a964fc427ed%2C38299%2C1731961314990.meta.1731961316104.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:24:54,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741842_1018 (size=5056) 2024-11-18T20:24:54,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39761 is added to blk_1073741842_1018 (size=5056) 2024-11-18T20:24:54,924 INFO [M:0;5a964fc427ed:44199 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6cf8d98575044d32a4aad9fa1e345ed9 2024-11-18T20:24:54,928 DEBUG [M:0;5a964fc427ed:44199 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1590f96c2a424e00b2a5b0555bb6c131 as hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1590f96c2a424e00b2a5b0555bb6c131 2024-11-18T20:24:54,933 INFO [M:0;5a964fc427ed:44199 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1590f96c2a424e00b2a5b0555bb6c131, entries=8, sequenceid=29, filesize=5.5 K 2024-11-18T20:24:54,934 DEBUG [M:0;5a964fc427ed:44199 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9fa1d4d784e14c189068d27cd86552e6 as hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9fa1d4d784e14c189068d27cd86552e6 2024-11-18T20:24:54,938 INFO [M:0;5a964fc427ed:44199 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9fa1d4d784e14c189068d27cd86552e6, entries=3, sequenceid=29, filesize=5.2 K 2024-11-18T20:24:54,939 DEBUG [M:0;5a964fc427ed:44199 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6bc0ec37de984ed79f294e7beb5395b2 as hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6bc0ec37de984ed79f294e7beb5395b2 2024-11-18T20:24:54,944 INFO [M:0;5a964fc427ed:44199 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6bc0ec37de984ed79f294e7beb5395b2, entries=1, sequenceid=29, filesize=5.0 K 2024-11-18T20:24:54,945 DEBUG [M:0;5a964fc427ed:44199 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6cf8d98575044d32a4aad9fa1e345ed9 as hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/6cf8d98575044d32a4aad9fa1e345ed9 2024-11-18T20:24:54,949 INFO [M:0;5a964fc427ed:44199 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43753/user/jenkins/test-data/f64ddf88-6572-13f6-7ea0-7fc4085e9653/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/6cf8d98575044d32a4aad9fa1e345ed9, entries=1, sequenceid=29, filesize=4.9 K 2024-11-18T20:24:54,950 INFO [M:0;5a964fc427ed:44199 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=29, compaction requested=false 2024-11-18T20:24:54,951 INFO [M:0;5a964fc427ed:44199 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:24:54,951 DEBUG [M:0;5a964fc427ed:44199 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961494820Disabling compacts and flushes for region at 1731961494820Disabling writes for close at 1731961494821 (+1 ms)Obtaining lock to block concurrent updates at 1731961494821Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731961494821Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731961494821Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731961494822 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731961494822Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731961494839 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731961494839Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731961494848 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731961494868 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731961494868Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731961494877 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731961494893 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731961494893Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731961494902 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731961494918 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731961494918Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@71a98ccd: reopening flushed file at 1731961494927 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4f633992: reopening flushed file at 1731961494933 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e5277d1: reopening flushed file at 1731961494938 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35b528c7: reopening flushed file at 1731961494944 (+6 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=29, compaction requested=false at 1731961494950 (+6 ms)Writing region close event to WAL at 1731961494951 (+1 ms)Closed at 1731961494951 2024-11-18T20:24:54,952 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,952 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,952 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,952 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,952 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:24:54,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39761 is added to blk_1073741830_1006 (size=10311) 2024-11-18T20:24:54,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741830_1006 (size=10311) 2024-11-18T20:24:54,954 INFO [M:0;5a964fc427ed:44199 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T20:24:54,954 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:24:54,954 INFO [M:0;5a964fc427ed:44199 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44199 2024-11-18T20:24:54,955 INFO [M:0;5a964fc427ed:44199 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:24:55,064 INFO [M:0;5a964fc427ed:44199 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:24:55,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:24:55,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44199-0x10150ccdd040000, quorum=127.0.0.1:56351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:24:55,072 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5991282a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:24:55,072 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@73bc7eb2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:24:55,072 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:24:55,072 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6df20715{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:24:55,072 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2269c58e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/hadoop.log.dir/,STOPPED} 2024-11-18T20:24:55,073 WARN [BP-899648378-172.17.0.2-1731961491804 heartbeating to localhost/127.0.0.1:43753 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:24:55,073 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:24:55,073 WARN [BP-899648378-172.17.0.2-1731961491804 heartbeating to localhost/127.0.0.1:43753 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-899648378-172.17.0.2-1731961491804 (Datanode Uuid f8452cf6-8861-4b31-9daf-1048da2ca9a6) service to localhost/127.0.0.1:43753 2024-11-18T20:24:55,073 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:24:55,074 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/cluster_34e920d6-3865-e44e-e2e2-76775664c6d5/data/data3/current/BP-899648378-172.17.0.2-1731961491804 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:24:55,074 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/cluster_34e920d6-3865-e44e-e2e2-76775664c6d5/data/data4/current/BP-899648378-172.17.0.2-1731961491804 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:24:55,075 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:24:55,077 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@68813b82{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:24:55,077 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@509dedeb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:24:55,077 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:24:55,077 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1773ea07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:24:55,077 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5eefcd14{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/hadoop.log.dir/,STOPPED} 2024-11-18T20:24:55,079 WARN [BP-899648378-172.17.0.2-1731961491804 heartbeating to localhost/127.0.0.1:43753 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:24:55,079 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:24:55,079 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:24:55,079 WARN [BP-899648378-172.17.0.2-1731961491804 heartbeating to localhost/127.0.0.1:43753 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-899648378-172.17.0.2-1731961491804 (Datanode Uuid 60ca41f1-fc84-4a0f-b120-bf1a0d68653e) service to localhost/127.0.0.1:43753 2024-11-18T20:24:55,079 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/cluster_34e920d6-3865-e44e-e2e2-76775664c6d5/data/data1/current/BP-899648378-172.17.0.2-1731961491804 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:24:55,079 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/cluster_34e920d6-3865-e44e-e2e2-76775664c6d5/data/data2/current/BP-899648378-172.17.0.2-1731961491804 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:24:55,079 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:24:55,084 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@60fdf071{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:24:55,084 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3ffef2a8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:24:55,085 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:24:55,085 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@814e400{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:24:55,085 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@238bf9b3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/66ef6e64-3a3b-38a0-b887-e8f053656674/hadoop.log.dir/,STOPPED} 2024-11-18T20:24:55,090 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T20:24:55,105 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T20:24:55,116 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=270 (was 231) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43753 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43753 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43753 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43753 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43753 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43753 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:43753 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43753 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=536 (was 522) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=233 (was 218) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3216 (was 3233)