2024-11-24 08:46:50,279 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-24 08:46:50,296 main DEBUG Took 0.014978 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-24 08:46:50,297 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-24 08:46:50,298 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-24 08:46:50,299 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-24 08:46:50,301 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:46:50,309 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-24 08:46:50,326 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:46:50,328 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:46:50,329 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:46:50,329 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:46:50,330 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:46:50,330 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:46:50,331 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:46:50,331 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:46:50,332 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:46:50,332 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:46:50,333 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:46:50,333 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:46:50,334 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:46:50,334 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:46:50,335 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:46:50,335 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:46:50,336 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:46:50,336 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:46:50,337 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:46:50,337 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:46:50,338 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:46:50,338 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:46:50,339 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:46:50,340 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:46:50,340 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:46:50,341 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-24 08:46:50,342 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:46:50,343 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-24 08:46:50,345 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-24 08:46:50,346 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-24 08:46:50,347 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-24 08:46:50,348 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-24 08:46:50,357 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-24 08:46:50,360 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-24 08:46:50,362 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-24 08:46:50,362 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-24 08:46:50,363 main DEBUG createAppenders(={Console}) 2024-11-24 08:46:50,364 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-24 08:46:50,364 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-24 08:46:50,364 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-24 08:46:50,365 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-24 08:46:50,365 main DEBUG OutputStream closed 2024-11-24 08:46:50,365 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-24 08:46:50,365 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-24 08:46:50,366 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-24 08:46:50,451 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-24 08:46:50,454 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-24 08:46:50,456 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-24 08:46:50,457 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-24 08:46:50,458 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-24 08:46:50,458 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-24 08:46:50,458 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-24 08:46:50,458 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-24 08:46:50,459 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-24 08:46:50,459 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-24 08:46:50,459 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-24 08:46:50,460 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-24 08:46:50,460 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-24 08:46:50,460 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-24 08:46:50,460 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-24 08:46:50,461 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-24 08:46:50,461 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-24 08:46:50,462 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-24 08:46:50,464 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-24 08:46:50,464 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-24 08:46:50,464 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-24 08:46:50,465 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-24T08:46:50,745 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6 2024-11-24 08:46:50,748 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-24 08:46:50,749 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-24T08:46:50,760 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-24T08:46:50,802 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=347, ProcessCount=11, AvailableMemoryMB=2770 2024-11-24T08:46:50,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T08:46:50,828 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/cluster_c6eb8802-84f1-9b1f-e14f-989c3f59ec7e, deleteOnExit=true 2024-11-24T08:46:50,829 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T08:46:50,831 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/test.cache.data in system properties and HBase conf 2024-11-24T08:46:50,832 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T08:46:50,833 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/hadoop.log.dir in system properties and HBase conf 2024-11-24T08:46:50,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T08:46:50,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T08:46:50,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T08:46:50,931 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-24T08:46:51,030 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T08:46:51,035 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:46:51,036 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:46:51,037 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T08:46:51,037 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:46:51,038 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T08:46:51,039 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T08:46:51,040 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:46:51,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:46:51,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T08:46:51,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/nfs.dump.dir in system properties and HBase conf 2024-11-24T08:46:51,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/java.io.tmpdir in system properties and HBase conf 2024-11-24T08:46:51,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:46:51,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T08:46:51,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T08:46:51,594 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:46:51,960 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-24T08:46:52,052 INFO [Time-limited test {}] log.Log(170): Logging initialized @2668ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-24T08:46:52,140 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:46:52,227 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:46:52,252 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:46:52,253 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:46:52,255 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:46:52,266 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:46:52,269 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75bdea07{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:46:52,270 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@455f3457{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:46:52,460 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5f961078{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/java.io.tmpdir/jetty-localhost-40841-hadoop-hdfs-3_4_1-tests_jar-_-any-15294014340666599556/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:46:52,467 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@25dfddc5{HTTP/1.1, (http/1.1)}{localhost:40841} 2024-11-24T08:46:52,468 INFO [Time-limited test {}] server.Server(415): Started @3085ms 2024-11-24T08:46:52,500 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:46:52,890 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:46:52,900 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:46:52,905 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:46:52,905 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:46:52,906 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:46:52,907 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@616d254c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:46:52,908 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@198fe7a1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:46:53,033 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@32c41a8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/java.io.tmpdir/jetty-localhost-33309-hadoop-hdfs-3_4_1-tests_jar-_-any-1434595720008102874/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:46:53,035 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21c64e78{HTTP/1.1, (http/1.1)}{localhost:33309} 2024-11-24T08:46:53,036 INFO [Time-limited test {}] server.Server(415): Started @3652ms 2024-11-24T08:46:53,114 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:46:53,302 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:46:53,320 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:46:53,330 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:46:53,330 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:46:53,330 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:46:53,332 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1612a852{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:46:53,333 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e06ea5e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:46:53,474 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@78be0d39{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/java.io.tmpdir/jetty-localhost-39299-hadoop-hdfs-3_4_1-tests_jar-_-any-11494082276620112082/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:46:53,475 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@617aa169{HTTP/1.1, (http/1.1)}{localhost:39299} 2024-11-24T08:46:53,475 INFO [Time-limited test {}] server.Server(415): Started @4092ms 2024-11-24T08:46:53,478 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:46:53,648 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/cluster_c6eb8802-84f1-9b1f-e14f-989c3f59ec7e/data/data3/current/BP-291149696-172.17.0.2-1732438011710/current, will proceed with Du for space computation calculation, 2024-11-24T08:46:53,648 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/cluster_c6eb8802-84f1-9b1f-e14f-989c3f59ec7e/data/data1/current/BP-291149696-172.17.0.2-1732438011710/current, will proceed with Du for space computation calculation, 2024-11-24T08:46:53,649 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/cluster_c6eb8802-84f1-9b1f-e14f-989c3f59ec7e/data/data2/current/BP-291149696-172.17.0.2-1732438011710/current, will proceed with Du for space computation calculation, 2024-11-24T08:46:53,651 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/cluster_c6eb8802-84f1-9b1f-e14f-989c3f59ec7e/data/data4/current/BP-291149696-172.17.0.2-1732438011710/current, will proceed with Du for space computation calculation, 2024-11-24T08:46:53,711 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:46:53,714 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:46:53,785 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa06cbb1e3c8162e6 with lease ID 0xca0293272b7b77f7: Processing first storage report for DS-32ed2431-95fd-4963-8db7-50cd1b935c5b from datanode DatanodeRegistration(127.0.0.1:38515, datanodeUuid=a8d1a49c-f448-4e92-a06f-c45124bf4596, infoPort=37427, infoSecurePort=0, ipcPort=45761, storageInfo=lv=-57;cid=testClusterID;nsid=1519103569;c=1732438011710) 2024-11-24T08:46:53,787 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa06cbb1e3c8162e6 with lease ID 0xca0293272b7b77f7: from storage DS-32ed2431-95fd-4963-8db7-50cd1b935c5b node DatanodeRegistration(127.0.0.1:38515, datanodeUuid=a8d1a49c-f448-4e92-a06f-c45124bf4596, infoPort=37427, infoSecurePort=0, ipcPort=45761, storageInfo=lv=-57;cid=testClusterID;nsid=1519103569;c=1732438011710), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-24T08:46:53,788 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc871bc998222f299 with lease ID 0xca0293272b7b77f6: Processing first storage report for DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93 from datanode DatanodeRegistration(127.0.0.1:39953, datanodeUuid=ae1c979a-d3d3-4e8e-904b-2aa3bd1750ae, infoPort=45551, infoSecurePort=0, ipcPort=39883, storageInfo=lv=-57;cid=testClusterID;nsid=1519103569;c=1732438011710) 2024-11-24T08:46:53,788 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc871bc998222f299 with lease ID 0xca0293272b7b77f6: from storage DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93 node DatanodeRegistration(127.0.0.1:39953, datanodeUuid=ae1c979a-d3d3-4e8e-904b-2aa3bd1750ae, infoPort=45551, infoSecurePort=0, ipcPort=39883, storageInfo=lv=-57;cid=testClusterID;nsid=1519103569;c=1732438011710), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T08:46:53,789 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa06cbb1e3c8162e6 with lease ID 0xca0293272b7b77f7: Processing first storage report for DS-5199936b-3477-4425-84c2-fbf5259d6e7d from datanode DatanodeRegistration(127.0.0.1:38515, datanodeUuid=a8d1a49c-f448-4e92-a06f-c45124bf4596, infoPort=37427, infoSecurePort=0, ipcPort=45761, storageInfo=lv=-57;cid=testClusterID;nsid=1519103569;c=1732438011710) 2024-11-24T08:46:53,789 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa06cbb1e3c8162e6 with lease ID 0xca0293272b7b77f7: from storage DS-5199936b-3477-4425-84c2-fbf5259d6e7d node DatanodeRegistration(127.0.0.1:38515, datanodeUuid=a8d1a49c-f448-4e92-a06f-c45124bf4596, infoPort=37427, infoSecurePort=0, ipcPort=45761, storageInfo=lv=-57;cid=testClusterID;nsid=1519103569;c=1732438011710), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T08:46:53,789 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc871bc998222f299 with lease ID 0xca0293272b7b77f6: Processing first storage report for DS-cdbd3473-e977-4d97-b4b2-a8b4adff8dbb from datanode DatanodeRegistration(127.0.0.1:39953, datanodeUuid=ae1c979a-d3d3-4e8e-904b-2aa3bd1750ae, infoPort=45551, infoSecurePort=0, ipcPort=39883, storageInfo=lv=-57;cid=testClusterID;nsid=1519103569;c=1732438011710) 2024-11-24T08:46:53,790 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc871bc998222f299 with lease ID 0xca0293272b7b77f6: from storage DS-cdbd3473-e977-4d97-b4b2-a8b4adff8dbb node DatanodeRegistration(127.0.0.1:39953, datanodeUuid=ae1c979a-d3d3-4e8e-904b-2aa3bd1750ae, infoPort=45551, infoSecurePort=0, ipcPort=39883, storageInfo=lv=-57;cid=testClusterID;nsid=1519103569;c=1732438011710), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:46:53,936 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6 2024-11-24T08:46:54,021 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/cluster_c6eb8802-84f1-9b1f-e14f-989c3f59ec7e/zookeeper_0, clientPort=62562, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/cluster_c6eb8802-84f1-9b1f-e14f-989c3f59ec7e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/cluster_c6eb8802-84f1-9b1f-e14f-989c3f59ec7e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T08:46:54,031 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62562 2024-11-24T08:46:54,044 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:46:54,050 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:46:54,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:46:54,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:46:54,740 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9 with version=8 2024-11-24T08:46:54,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/hbase-staging 2024-11-24T08:46:54,840 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-24T08:46:55,072 INFO [Time-limited test {}] client.ConnectionUtils(128): master/469387a2cdb6:0 server-side Connection retries=45 2024-11-24T08:46:55,087 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:46:55,088 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:46:55,095 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:46:55,095 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:46:55,095 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:46:55,252 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T08:46:55,325 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-24T08:46:55,337 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-24T08:46:55,341 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:46:55,373 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 68860 (auto-detected) 2024-11-24T08:46:55,374 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-24T08:46:55,392 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37057 2024-11-24T08:46:55,413 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37057 connecting to ZooKeeper ensemble=127.0.0.1:62562 2024-11-24T08:46:55,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:370570x0, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:46:55,446 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37057-0x10070e9949f0000 connected 2024-11-24T08:46:55,477 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:46:55,479 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:46:55,488 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:46:55,492 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9, hbase.cluster.distributed=false 2024-11-24T08:46:55,522 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:46:55,527 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37057 2024-11-24T08:46:55,528 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37057 2024-11-24T08:46:55,529 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37057 2024-11-24T08:46:55,533 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37057 2024-11-24T08:46:55,533 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37057 2024-11-24T08:46:55,678 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/469387a2cdb6:0 server-side Connection retries=45 2024-11-24T08:46:55,681 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:46:55,681 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:46:55,682 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:46:55,682 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:46:55,682 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:46:55,686 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T08:46:55,690 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:46:55,691 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42615 2024-11-24T08:46:55,694 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42615 connecting to ZooKeeper ensemble=127.0.0.1:62562 2024-11-24T08:46:55,695 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:46:55,702 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:46:55,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:426150x0, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:46:55,723 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:426150x0, quorum=127.0.0.1:62562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:46:55,725 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42615-0x10070e9949f0001 connected 2024-11-24T08:46:55,727 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T08:46:55,742 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T08:46:55,744 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42615-0x10070e9949f0001, quorum=127.0.0.1:62562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T08:46:55,750 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42615-0x10070e9949f0001, quorum=127.0.0.1:62562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:46:55,751 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42615 2024-11-24T08:46:55,757 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42615 2024-11-24T08:46:55,758 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42615 2024-11-24T08:46:55,761 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42615 2024-11-24T08:46:55,762 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42615 2024-11-24T08:46:55,785 DEBUG [M:0;469387a2cdb6:37057 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;469387a2cdb6:37057 2024-11-24T08:46:55,789 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/469387a2cdb6,37057,1732438014894 2024-11-24T08:46:55,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:46:55,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42615-0x10070e9949f0001, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:46:55,799 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/469387a2cdb6,37057,1732438014894 2024-11-24T08:46:55,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42615-0x10070e9949f0001, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T08:46:55,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:46:55,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42615-0x10070e9949f0001, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:46:55,835 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T08:46:55,836 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/469387a2cdb6,37057,1732438014894 from backup master directory 2024-11-24T08:46:55,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/469387a2cdb6,37057,1732438014894 2024-11-24T08:46:55,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42615-0x10070e9949f0001, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:46:55,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:46:55,841 WARN [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:46:55,841 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=469387a2cdb6,37057,1732438014894 2024-11-24T08:46:55,844 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-24T08:46:55,846 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-24T08:46:55,919 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/hbase.id] with ID: 4d97774d-e2c7-443a-b3b7-162f07648e72 2024-11-24T08:46:55,919 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/.tmp/hbase.id 2024-11-24T08:46:55,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:46:55,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:46:55,936 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/.tmp/hbase.id]:[hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/hbase.id] 2024-11-24T08:46:55,994 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:46:56,000 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T08:46:56,023 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 21ms. 2024-11-24T08:46:56,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42615-0x10070e9949f0001, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:46:56,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:46:56,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:46:56,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:46:56,069 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:46:56,071 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T08:46:56,078 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:46:56,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:46:56,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:46:56,144 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store 2024-11-24T08:46:56,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:46:56,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:46:56,173 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-24T08:46:56,177 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:46:56,178 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:46:56,178 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:46:56,179 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:46:56,181 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:46:56,181 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:46:56,181 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:46:56,183 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732438016178Disabling compacts and flushes for region at 1732438016178Disabling writes for close at 1732438016181 (+3 ms)Writing region close event to WAL at 1732438016181Closed at 1732438016181 2024-11-24T08:46:56,185 WARN [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/.initializing 2024-11-24T08:46:56,185 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/WALs/469387a2cdb6,37057,1732438014894 2024-11-24T08:46:56,210 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=469387a2cdb6%2C37057%2C1732438014894, suffix=, logDir=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/WALs/469387a2cdb6,37057,1732438014894, archiveDir=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/oldWALs, maxLogs=10 2024-11-24T08:46:56,220 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C37057%2C1732438014894.1732438016215 2024-11-24T08:46:56,259 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/WALs/469387a2cdb6,37057,1732438014894/469387a2cdb6%2C37057%2C1732438014894.1732438016215 2024-11-24T08:46:56,268 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37427:37427),(127.0.0.1/127.0.0.1:45551:45551)] 2024-11-24T08:46:56,274 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:46:56,274 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:46:56,277 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:46:56,278 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:46:56,316 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:46:56,344 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T08:46:56,348 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:46:56,351 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:46:56,352 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:46:56,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T08:46:56,356 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:46:56,357 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:46:56,357 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:46:56,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T08:46:56,360 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:46:56,361 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:46:56,362 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:46:56,364 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T08:46:56,364 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:46:56,365 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:46:56,366 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:46:56,371 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:46:56,373 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:46:56,379 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:46:56,379 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:46:56,383 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T08:46:56,387 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:46:56,391 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:46:56,393 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=727167, jitterRate=-0.07536040246486664}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T08:46:56,401 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732438016291Initializing all the Stores at 1732438016293 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438016294 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438016295 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438016295Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438016295Cleaning up temporary data from old regions at 1732438016380 (+85 ms)Region opened successfully at 1732438016401 (+21 ms) 2024-11-24T08:46:56,403 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T08:46:56,442 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e0a9a9b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=469387a2cdb6/172.17.0.2:0 2024-11-24T08:46:56,475 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T08:46:56,488 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T08:46:56,488 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T08:46:56,492 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T08:46:56,497 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 4 msec 2024-11-24T08:46:56,503 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-24T08:46:56,503 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T08:46:56,536 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T08:46:56,548 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T08:46:56,550 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T08:46:56,553 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T08:46:56,555 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T08:46:56,556 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T08:46:56,559 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T08:46:56,563 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T08:46:56,565 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T08:46:56,566 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T08:46:56,567 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T08:46:56,584 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T08:46:56,585 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T08:46:56,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:46:56,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42615-0x10070e9949f0001, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:46:56,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42615-0x10070e9949f0001, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:46:56,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:46:56,591 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=469387a2cdb6,37057,1732438014894, sessionid=0x10070e9949f0000, setting cluster-up flag (Was=false) 2024-11-24T08:46:56,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:46:56,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42615-0x10070e9949f0001, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:46:56,606 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T08:46:56,607 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=469387a2cdb6,37057,1732438014894 2024-11-24T08:46:56,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:46:56,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42615-0x10070e9949f0001, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:46:56,616 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T08:46:56,618 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=469387a2cdb6,37057,1732438014894 2024-11-24T08:46:56,624 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T08:46:56,666 INFO [RS:0;469387a2cdb6:42615 {}] regionserver.HRegionServer(746): ClusterId : 4d97774d-e2c7-443a-b3b7-162f07648e72 2024-11-24T08:46:56,669 DEBUG [RS:0;469387a2cdb6:42615 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T08:46:56,674 DEBUG [RS:0;469387a2cdb6:42615 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T08:46:56,674 DEBUG [RS:0;469387a2cdb6:42615 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T08:46:56,677 DEBUG [RS:0;469387a2cdb6:42615 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T08:46:56,678 DEBUG [RS:0;469387a2cdb6:42615 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d876cbb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=469387a2cdb6/172.17.0.2:0 2024-11-24T08:46:56,693 DEBUG [RS:0;469387a2cdb6:42615 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;469387a2cdb6:42615 2024-11-24T08:46:56,695 INFO [RS:0;469387a2cdb6:42615 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T08:46:56,696 INFO [RS:0;469387a2cdb6:42615 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T08:46:56,696 DEBUG [RS:0;469387a2cdb6:42615 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T08:46:56,698 INFO [RS:0;469387a2cdb6:42615 {}] regionserver.HRegionServer(2659): reportForDuty to master=469387a2cdb6,37057,1732438014894 with port=42615, startcode=1732438015629 2024-11-24T08:46:56,708 DEBUG [RS:0;469387a2cdb6:42615 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T08:46:56,717 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T08:46:56,728 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T08:46:56,736 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T08:46:56,743 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 469387a2cdb6,37057,1732438014894 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T08:46:56,752 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:46:56,752 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:46:56,753 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:46:56,753 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:46:56,753 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/469387a2cdb6:0, corePoolSize=10, maxPoolSize=10 2024-11-24T08:46:56,753 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:46:56,753 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/469387a2cdb6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:46:56,754 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:46:56,761 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:46:56,761 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T08:46:56,767 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732438046767 2024-11-24T08:46:56,768 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:46:56,769 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T08:46:56,769 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T08:46:56,770 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T08:46:56,775 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43999, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T08:46:56,777 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T08:46:56,777 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T08:46:56,777 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T08:46:56,778 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T08:46:56,784 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:46:56,790 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T08:46:56,791 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T08:46:56,792 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T08:46:56,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:46:56,788 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37057 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-24T08:46:56,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:46:56,797 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T08:46:56,797 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9 2024-11-24T08:46:56,801 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T08:46:56,802 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T08:46:56,809 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.large.0-1732438016804,5,FailOnTimeoutGroup] 2024-11-24T08:46:56,810 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.small.0-1732438016809,5,FailOnTimeoutGroup] 2024-11-24T08:46:56,810 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:46:56,810 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T08:46:56,812 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T08:46:56,812 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T08:46:56,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:46:56,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:46:56,826 DEBUG [RS:0;469387a2cdb6:42615 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-24T08:46:56,826 WARN [RS:0;469387a2cdb6:42615 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-24T08:46:56,928 INFO [RS:0;469387a2cdb6:42615 {}] regionserver.HRegionServer(2659): reportForDuty to master=469387a2cdb6,37057,1732438014894 with port=42615, startcode=1732438015629 2024-11-24T08:46:56,930 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37057 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 469387a2cdb6,42615,1732438015629 2024-11-24T08:46:56,933 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37057 {}] master.ServerManager(517): Registering regionserver=469387a2cdb6,42615,1732438015629 2024-11-24T08:46:56,943 DEBUG [RS:0;469387a2cdb6:42615 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9 2024-11-24T08:46:56,943 DEBUG [RS:0;469387a2cdb6:42615 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34511 2024-11-24T08:46:56,943 DEBUG [RS:0;469387a2cdb6:42615 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T08:46:56,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:46:56,948 DEBUG [RS:0;469387a2cdb6:42615 {}] zookeeper.ZKUtil(111): regionserver:42615-0x10070e9949f0001, quorum=127.0.0.1:62562, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/469387a2cdb6,42615,1732438015629 2024-11-24T08:46:56,948 WARN [RS:0;469387a2cdb6:42615 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:46:56,948 INFO [RS:0;469387a2cdb6:42615 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:46:56,949 DEBUG [RS:0;469387a2cdb6:42615 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629 2024-11-24T08:46:56,951 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [469387a2cdb6,42615,1732438015629] 2024-11-24T08:46:56,973 INFO [RS:0;469387a2cdb6:42615 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T08:46:56,992 INFO [RS:0;469387a2cdb6:42615 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T08:46:56,998 INFO [RS:0;469387a2cdb6:42615 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T08:46:56,998 INFO [RS:0;469387a2cdb6:42615 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:46:57,000 INFO [RS:0;469387a2cdb6:42615 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T08:46:57,006 INFO [RS:0;469387a2cdb6:42615 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T08:46:57,009 INFO [RS:0;469387a2cdb6:42615 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T08:46:57,009 DEBUG [RS:0;469387a2cdb6:42615 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:46:57,009 DEBUG [RS:0;469387a2cdb6:42615 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:46:57,010 DEBUG [RS:0;469387a2cdb6:42615 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:46:57,010 DEBUG [RS:0;469387a2cdb6:42615 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:46:57,010 DEBUG [RS:0;469387a2cdb6:42615 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:46:57,010 DEBUG [RS:0;469387a2cdb6:42615 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/469387a2cdb6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:46:57,010 DEBUG [RS:0;469387a2cdb6:42615 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:46:57,010 DEBUG [RS:0;469387a2cdb6:42615 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:46:57,010 DEBUG [RS:0;469387a2cdb6:42615 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:46:57,010 DEBUG [RS:0;469387a2cdb6:42615 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:46:57,011 DEBUG [RS:0;469387a2cdb6:42615 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:46:57,011 DEBUG [RS:0;469387a2cdb6:42615 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:46:57,011 DEBUG [RS:0;469387a2cdb6:42615 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/469387a2cdb6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:46:57,011 DEBUG [RS:0;469387a2cdb6:42615 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:46:57,012 INFO [RS:0;469387a2cdb6:42615 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:46:57,012 INFO [RS:0;469387a2cdb6:42615 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:46:57,012 INFO [RS:0;469387a2cdb6:42615 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:46:57,012 INFO [RS:0;469387a2cdb6:42615 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T08:46:57,012 INFO [RS:0;469387a2cdb6:42615 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T08:46:57,012 INFO [RS:0;469387a2cdb6:42615 {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,42615,1732438015629-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:46:57,032 INFO [RS:0;469387a2cdb6:42615 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T08:46:57,035 INFO [RS:0;469387a2cdb6:42615 {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,42615,1732438015629-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:46:57,035 INFO [RS:0;469387a2cdb6:42615 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:46:57,036 INFO [RS:0;469387a2cdb6:42615 {}] regionserver.Replication(171): 469387a2cdb6,42615,1732438015629 started 2024-11-24T08:46:57,058 INFO [RS:0;469387a2cdb6:42615 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:46:57,058 INFO [RS:0;469387a2cdb6:42615 {}] regionserver.HRegionServer(1482): Serving as 469387a2cdb6,42615,1732438015629, RpcServer on 469387a2cdb6/172.17.0.2:42615, sessionid=0x10070e9949f0001 2024-11-24T08:46:57,059 DEBUG [RS:0;469387a2cdb6:42615 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T08:46:57,060 DEBUG [RS:0;469387a2cdb6:42615 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 469387a2cdb6,42615,1732438015629 2024-11-24T08:46:57,060 DEBUG [RS:0;469387a2cdb6:42615 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '469387a2cdb6,42615,1732438015629' 2024-11-24T08:46:57,060 DEBUG [RS:0;469387a2cdb6:42615 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T08:46:57,062 DEBUG [RS:0;469387a2cdb6:42615 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T08:46:57,063 DEBUG [RS:0;469387a2cdb6:42615 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T08:46:57,063 DEBUG [RS:0;469387a2cdb6:42615 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T08:46:57,063 DEBUG [RS:0;469387a2cdb6:42615 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 469387a2cdb6,42615,1732438015629 2024-11-24T08:46:57,063 DEBUG [RS:0;469387a2cdb6:42615 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '469387a2cdb6,42615,1732438015629' 2024-11-24T08:46:57,063 DEBUG [RS:0;469387a2cdb6:42615 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T08:46:57,064 DEBUG [RS:0;469387a2cdb6:42615 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T08:46:57,065 DEBUG [RS:0;469387a2cdb6:42615 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T08:46:57,065 INFO [RS:0;469387a2cdb6:42615 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T08:46:57,065 INFO [RS:0;469387a2cdb6:42615 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T08:46:57,173 INFO [RS:0;469387a2cdb6:42615 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=469387a2cdb6%2C42615%2C1732438015629, suffix=, logDir=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629, archiveDir=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/oldWALs, maxLogs=32 2024-11-24T08:46:57,177 INFO [RS:0;469387a2cdb6:42615 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C42615%2C1732438015629.1732438017176 2024-11-24T08:46:57,186 INFO [RS:0;469387a2cdb6:42615 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438017176 2024-11-24T08:46:57,191 DEBUG [RS:0;469387a2cdb6:42615 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45551:45551),(127.0.0.1/127.0.0.1:37427:37427)] 2024-11-24T08:46:57,226 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:46:57,229 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:46:57,232 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:46:57,232 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:46:57,234 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:46:57,234 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:46:57,237 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:46:57,237 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:46:57,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:46:57,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:46:57,242 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:46:57,242 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:46:57,243 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:46:57,243 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:46:57,247 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:46:57,247 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:46:57,248 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:46:57,249 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:46:57,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/hbase/meta/1588230740 2024-11-24T08:46:57,251 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/hbase/meta/1588230740 2024-11-24T08:46:57,254 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:46:57,254 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:46:57,255 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:46:57,258 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:46:57,263 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:46:57,265 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=795882, jitterRate=0.01201649010181427}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:46:57,269 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732438017226Initializing all the Stores at 1732438017228 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438017228Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438017229 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438017229Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438017229Cleaning up temporary data from old regions at 1732438017254 (+25 ms)Region opened successfully at 1732438017269 (+15 ms) 2024-11-24T08:46:57,269 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:46:57,269 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:46:57,269 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:46:57,269 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:46:57,269 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:46:57,271 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:46:57,271 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732438017269Disabling compacts and flushes for region at 1732438017269Disabling writes for close at 1732438017269Writing region close event to WAL at 1732438017270 (+1 ms)Closed at 1732438017271 (+1 ms) 2024-11-24T08:46:57,275 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:46:57,275 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T08:46:57,284 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T08:46:57,294 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:46:57,297 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T08:46:57,450 DEBUG [469387a2cdb6:37057 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T08:46:57,464 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=469387a2cdb6,42615,1732438015629 2024-11-24T08:46:57,470 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 469387a2cdb6,42615,1732438015629, state=OPENING 2024-11-24T08:46:57,475 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T08:46:57,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42615-0x10070e9949f0001, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:46:57,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:46:57,477 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:46:57,477 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:46:57,479 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:46:57,481 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=469387a2cdb6,42615,1732438015629}] 2024-11-24T08:46:57,657 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T08:46:57,661 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36049, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T08:46:57,671 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T08:46:57,672 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:46:57,676 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=469387a2cdb6%2C42615%2C1732438015629.meta, suffix=.meta, logDir=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629, archiveDir=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/oldWALs, maxLogs=32 2024-11-24T08:46:57,679 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C42615%2C1732438015629.meta.1732438017679.meta 2024-11-24T08:46:57,687 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.meta.1732438017679.meta 2024-11-24T08:46:57,691 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45551:45551),(127.0.0.1/127.0.0.1:37427:37427)] 2024-11-24T08:46:57,692 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:46:57,694 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T08:46:57,696 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T08:46:57,701 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T08:46:57,705 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T08:46:57,705 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:46:57,705 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T08:46:57,705 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T08:46:57,708 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:46:57,709 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:46:57,710 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:46:57,710 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:46:57,711 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:46:57,712 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:46:57,712 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:46:57,713 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:46:57,713 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:46:57,715 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:46:57,715 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:46:57,716 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:46:57,716 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:46:57,717 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:46:57,717 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:46:57,718 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:46:57,718 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:46:57,720 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/hbase/meta/1588230740 2024-11-24T08:46:57,722 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/hbase/meta/1588230740 2024-11-24T08:46:57,725 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:46:57,725 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:46:57,726 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:46:57,728 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:46:57,730 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=794306, jitterRate=0.010012790560722351}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:46:57,730 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T08:46:57,731 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732438017706Writing region info on filesystem at 1732438017706Initializing all the Stores at 1732438017708 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438017708Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438017708Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438017708Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438017708Cleaning up temporary data from old regions at 1732438017725 (+17 ms)Running coprocessor post-open hooks at 1732438017730 (+5 ms)Region opened successfully at 1732438017731 (+1 ms) 2024-11-24T08:46:57,737 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732438017649 2024-11-24T08:46:57,748 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T08:46:57,749 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T08:46:57,751 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=469387a2cdb6,42615,1732438015629 2024-11-24T08:46:57,753 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 469387a2cdb6,42615,1732438015629, state=OPEN 2024-11-24T08:46:57,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:46:57,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42615-0x10070e9949f0001, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:46:57,761 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:46:57,761 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:46:57,762 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=469387a2cdb6,42615,1732438015629 2024-11-24T08:46:57,768 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T08:46:57,769 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=469387a2cdb6,42615,1732438015629 in 281 msec 2024-11-24T08:46:57,776 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T08:46:57,776 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 487 msec 2024-11-24T08:46:57,777 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:46:57,778 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T08:46:57,798 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:46:57,799 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=469387a2cdb6,42615,1732438015629, seqNum=-1] 2024-11-24T08:46:57,816 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:46:57,819 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35761, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:46:57,841 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1760 sec 2024-11-24T08:46:57,841 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732438017841, completionTime=-1 2024-11-24T08:46:57,843 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T08:46:57,844 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T08:46:57,866 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T08:46:57,866 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732438077866 2024-11-24T08:46:57,866 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732438137866 2024-11-24T08:46:57,866 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 22 msec 2024-11-24T08:46:57,869 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,37057,1732438014894-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:46:57,869 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,37057,1732438014894-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:46:57,869 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,37057,1732438014894-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:46:57,871 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-469387a2cdb6:37057, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:46:57,871 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T08:46:57,872 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T08:46:57,878 DEBUG [master/469387a2cdb6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T08:46:57,897 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.056sec 2024-11-24T08:46:57,898 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T08:46:57,900 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T08:46:57,900 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T08:46:57,901 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T08:46:57,901 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T08:46:57,902 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,37057,1732438014894-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:46:57,902 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,37057,1732438014894-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T08:46:57,910 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T08:46:57,911 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T08:46:57,911 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,37057,1732438014894-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:46:57,976 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5901b87c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:46:57,978 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-24T08:46:57,978 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-24T08:46:57,980 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 469387a2cdb6,37057,-1 for getting cluster id 2024-11-24T08:46:57,983 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T08:46:57,990 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4d97774d-e2c7-443a-b3b7-162f07648e72' 2024-11-24T08:46:57,992 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T08:46:57,992 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4d97774d-e2c7-443a-b3b7-162f07648e72" 2024-11-24T08:46:57,994 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18e02270, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:46:57,994 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [469387a2cdb6,37057,-1] 2024-11-24T08:46:57,997 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T08:46:57,998 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:46:58,000 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60370, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T08:46:58,003 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@526567d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:46:58,004 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:46:58,030 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=469387a2cdb6,42615,1732438015629, seqNum=-1] 2024-11-24T08:46:58,031 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:46:58,034 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52112, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:46:58,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=469387a2cdb6,37057,1732438014894 2024-11-24T08:46:58,058 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:46:58,065 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T08:46:58,069 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T08:46:58,074 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 469387a2cdb6,37057,1732438014894 2024-11-24T08:46:58,077 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@e3ff8cb 2024-11-24T08:46:58,078 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T08:46:58,081 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60382, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T08:46:58,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37057 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T08:46:58,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37057 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T08:46:58,087 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37057 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:46:58,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37057 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-24T08:46:58,096 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T08:46:58,098 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37057 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-24T08:46:58,098 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:46:58,100 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T08:46:58,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37057 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T08:46:58,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741835_1011 (size=389) 2024-11-24T08:46:58,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741835_1011 (size=389) 2024-11-24T08:46:58,148 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5ca6d68759e1b6b2b436e1562757b2e1, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9 2024-11-24T08:46:58,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741836_1012 (size=72) 2024-11-24T08:46:58,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741836_1012 (size=72) 2024-11-24T08:46:58,159 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:46:58,159 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 5ca6d68759e1b6b2b436e1562757b2e1, disabling compactions & flushes 2024-11-24T08:46:58,159 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1. 2024-11-24T08:46:58,159 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1. 2024-11-24T08:46:58,159 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1. after waiting 0 ms 2024-11-24T08:46:58,159 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1. 2024-11-24T08:46:58,159 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1. 2024-11-24T08:46:58,159 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5ca6d68759e1b6b2b436e1562757b2e1: Waiting for close lock at 1732438018159Disabling compacts and flushes for region at 1732438018159Disabling writes for close at 1732438018159Writing region close event to WAL at 1732438018159Closed at 1732438018159 2024-11-24T08:46:58,162 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T08:46:58,166 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732438018162"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732438018162"}]},"ts":"1732438018162"} 2024-11-24T08:46:58,171 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T08:46:58,174 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T08:46:58,176 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732438018174"}]},"ts":"1732438018174"} 2024-11-24T08:46:58,180 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-24T08:46:58,182 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=5ca6d68759e1b6b2b436e1562757b2e1, ASSIGN}] 2024-11-24T08:46:58,184 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=5ca6d68759e1b6b2b436e1562757b2e1, ASSIGN 2024-11-24T08:46:58,186 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=5ca6d68759e1b6b2b436e1562757b2e1, ASSIGN; state=OFFLINE, location=469387a2cdb6,42615,1732438015629; forceNewPlan=false, retain=false 2024-11-24T08:46:58,340 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5ca6d68759e1b6b2b436e1562757b2e1, regionState=OPENING, regionLocation=469387a2cdb6,42615,1732438015629 2024-11-24T08:46:58,348 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=5ca6d68759e1b6b2b436e1562757b2e1, ASSIGN because future has completed 2024-11-24T08:46:58,349 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5ca6d68759e1b6b2b436e1562757b2e1, server=469387a2cdb6,42615,1732438015629}] 2024-11-24T08:46:58,516 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1. 2024-11-24T08:46:58,516 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 5ca6d68759e1b6b2b436e1562757b2e1, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:46:58,517 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 5ca6d68759e1b6b2b436e1562757b2e1 2024-11-24T08:46:58,517 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:46:58,517 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 5ca6d68759e1b6b2b436e1562757b2e1 2024-11-24T08:46:58,517 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 5ca6d68759e1b6b2b436e1562757b2e1 2024-11-24T08:46:58,520 INFO [StoreOpener-5ca6d68759e1b6b2b436e1562757b2e1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 5ca6d68759e1b6b2b436e1562757b2e1 2024-11-24T08:46:58,522 INFO [StoreOpener-5ca6d68759e1b6b2b436e1562757b2e1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5ca6d68759e1b6b2b436e1562757b2e1 columnFamilyName info 2024-11-24T08:46:58,522 DEBUG [StoreOpener-5ca6d68759e1b6b2b436e1562757b2e1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:46:58,523 INFO [StoreOpener-5ca6d68759e1b6b2b436e1562757b2e1-1 {}] regionserver.HStore(327): Store=5ca6d68759e1b6b2b436e1562757b2e1/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:46:58,523 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 5ca6d68759e1b6b2b436e1562757b2e1 2024-11-24T08:46:58,525 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1 2024-11-24T08:46:58,525 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1 2024-11-24T08:46:58,526 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 5ca6d68759e1b6b2b436e1562757b2e1 2024-11-24T08:46:58,526 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 5ca6d68759e1b6b2b436e1562757b2e1 2024-11-24T08:46:58,528 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 5ca6d68759e1b6b2b436e1562757b2e1 2024-11-24T08:46:58,531 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:46:58,532 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 5ca6d68759e1b6b2b436e1562757b2e1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=807882, jitterRate=0.027276143431663513}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T08:46:58,532 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5ca6d68759e1b6b2b436e1562757b2e1 2024-11-24T08:46:58,533 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 5ca6d68759e1b6b2b436e1562757b2e1: Running coprocessor pre-open hook at 1732438018517Writing region info on filesystem at 1732438018517Initializing all the Stores at 1732438018519 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438018519Cleaning up temporary data from old regions at 1732438018526 (+7 ms)Running coprocessor post-open hooks at 1732438018532 (+6 ms)Region opened successfully at 1732438018533 (+1 ms) 2024-11-24T08:46:58,535 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1., pid=6, masterSystemTime=1732438018505 2024-11-24T08:46:58,539 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1. 2024-11-24T08:46:58,539 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1. 2024-11-24T08:46:58,540 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5ca6d68759e1b6b2b436e1562757b2e1, regionState=OPEN, openSeqNum=2, regionLocation=469387a2cdb6,42615,1732438015629 2024-11-24T08:46:58,544 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5ca6d68759e1b6b2b436e1562757b2e1, server=469387a2cdb6,42615,1732438015629 because future has completed 2024-11-24T08:46:58,550 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T08:46:58,550 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 5ca6d68759e1b6b2b436e1562757b2e1, server=469387a2cdb6,42615,1732438015629 in 197 msec 2024-11-24T08:46:58,555 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T08:46:58,555 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=5ca6d68759e1b6b2b436e1562757b2e1, ASSIGN in 368 msec 2024-11-24T08:46:58,556 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T08:46:58,557 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732438018557"}]},"ts":"1732438018557"} 2024-11-24T08:46:58,560 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-24T08:46:58,562 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T08:46:58,565 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 473 msec 2024-11-24T08:47:03,212 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-24T08:47:03,264 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T08:47:03,266 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-24T08:47:05,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T08:47:05,319 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T08:47:05,321 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-24T08:47:05,321 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-24T08:47:05,322 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:47:05,322 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T08:47:05,323 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T08:47:05,323 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-24T08:47:08,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37057 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T08:47:08,128 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-24T08:47:08,131 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-24T08:47:08,137 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-24T08:47:08,137 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1. 2024-11-24T08:47:08,138 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C42615%2C1732438015629.1732438028138 2024-11-24T08:47:08,146 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:47:08,146 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:47:08,147 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:47:08,147 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:47:08,147 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:47:08,147 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438017176 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438028138 2024-11-24T08:47:08,149 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37427:37427),(127.0.0.1/127.0.0.1:45551:45551)] 2024-11-24T08:47:08,149 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438017176 is not closed yet, will try archiving it next time 2024-11-24T08:47:08,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741833_1009 (size=451) 2024-11-24T08:47:08,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741833_1009 (size=451) 2024-11-24T08:47:08,152 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438017176 to hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/oldWALs/469387a2cdb6%2C42615%2C1732438015629.1732438017176 2024-11-24T08:47:08,159 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1., hostname=469387a2cdb6,42615,1732438015629, seqNum=2] 2024-11-24T08:47:20,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42615 {}] regionserver.HRegion(8855): Flush requested on 5ca6d68759e1b6b2b436e1562757b2e1 2024-11-24T08:47:20,200 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5ca6d68759e1b6b2b436e1562757b2e1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:47:20,266 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/.tmp/info/095b7edac9a64209b1e465d4ca0be459 is 1080, key is row0001/info:/1732438028161/Put/seqid=0 2024-11-24T08:47:20,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741838_1014 (size=12509) 2024-11-24T08:47:20,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741838_1014 (size=12509) 2024-11-24T08:47:20,279 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/.tmp/info/095b7edac9a64209b1e465d4ca0be459 2024-11-24T08:47:20,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/.tmp/info/095b7edac9a64209b1e465d4ca0be459 as hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/095b7edac9a64209b1e465d4ca0be459 2024-11-24T08:47:20,336 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/095b7edac9a64209b1e465d4ca0be459, entries=7, sequenceid=11, filesize=12.2 K 2024-11-24T08:47:20,345 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 5ca6d68759e1b6b2b436e1562757b2e1 in 143ms, sequenceid=11, compaction requested=false 2024-11-24T08:47:20,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5ca6d68759e1b6b2b436e1562757b2e1: 2024-11-24T08:47:23,933 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T08:47:28,220 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C42615%2C1732438015629.1732438048219 2024-11-24T08:47:28,434 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 211 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:47:28,435 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:47:28,435 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:47:28,435 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:47:28,435 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:47:28,435 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:47:28,436 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438028138 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438048219 2024-11-24T08:47:28,436 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37427:37427),(127.0.0.1/127.0.0.1:45551:45551)] 2024-11-24T08:47:28,436 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438028138 is not closed yet, will try archiving it next time 2024-11-24T08:47:28,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741837_1013 (size=12399) 2024-11-24T08:47:28,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741837_1013 (size=12399) 2024-11-24T08:47:28,642 INFO [FSHLog-0-hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9-prefix:469387a2cdb6,42615,1732438015629 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:47:30,849 INFO [FSHLog-0-hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9-prefix:469387a2cdb6,42615,1732438015629 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:47:33,056 INFO [FSHLog-0-hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9-prefix:469387a2cdb6,42615,1732438015629 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:47:35,262 INFO [FSHLog-0-hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9-prefix:469387a2cdb6,42615,1732438015629 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:47:35,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42615 {}] regionserver.HRegion(8855): Flush requested on 5ca6d68759e1b6b2b436e1562757b2e1 2024-11-24T08:47:35,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5ca6d68759e1b6b2b436e1562757b2e1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:47:35,465 INFO [FSHLog-0-hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9-prefix:469387a2cdb6,42615,1732438015629 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:47:35,470 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/.tmp/info/f44aea4a834648dd951be7648c685455 is 1080, key is row0008/info:/1732438042200/Put/seqid=0 2024-11-24T08:47:35,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741840_1016 (size=12509) 2024-11-24T08:47:35,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741840_1016 (size=12509) 2024-11-24T08:47:35,881 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/.tmp/info/f44aea4a834648dd951be7648c685455 2024-11-24T08:47:35,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/.tmp/info/f44aea4a834648dd951be7648c685455 as hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/f44aea4a834648dd951be7648c685455 2024-11-24T08:47:35,910 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/f44aea4a834648dd951be7648c685455, entries=7, sequenceid=21, filesize=12.2 K 2024-11-24T08:47:36,114 INFO [FSHLog-0-hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9-prefix:469387a2cdb6,42615,1732438015629 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:47:36,114 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 5ca6d68759e1b6b2b436e1562757b2e1 in 852ms, sequenceid=21, compaction requested=false 2024-11-24T08:47:36,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5ca6d68759e1b6b2b436e1562757b2e1: 2024-11-24T08:47:36,115 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-24T08:47:36,116 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:47:36,117 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/095b7edac9a64209b1e465d4ca0be459 because midkey is the same as first or last row 2024-11-24T08:47:37,470 INFO [FSHLog-0-hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9-prefix:469387a2cdb6,42615,1732438015629 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:47:37,965 INFO [master/469387a2cdb6:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T08:47:37,965 INFO [master/469387a2cdb6:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T08:47:39,679 INFO [FSHLog-0-hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9-prefix:469387a2cdb6,42615,1732438015629 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:47:39,686 WARN [FSHLog-0-hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9-prefix:469387a2cdb6,42615,1732438015629 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:47:39,688 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 469387a2cdb6%2C42615%2C1732438015629:(num 1732438048219) roll requested 2024-11-24T08:47:39,688 INFO [regionserver/469387a2cdb6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C42615%2C1732438015629.1732438059688 2024-11-24T08:47:39,902 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 211 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:47:39,902 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:47:39,902 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:47:39,902 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:47:39,903 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:47:39,903 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:47:39,903 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438048219 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438059688 2024-11-24T08:47:39,904 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37427:37427),(127.0.0.1/127.0.0.1:45551:45551)] 2024-11-24T08:47:39,904 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438048219 is not closed yet, will try archiving it next time 2024-11-24T08:47:39,904 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438028138 to hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/oldWALs/469387a2cdb6%2C42615%2C1732438015629.1732438028138 2024-11-24T08:47:39,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741839_1015 (size=7739) 2024-11-24T08:47:39,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741839_1015 (size=7739) 2024-11-24T08:47:41,884 INFO [FSHLog-0-hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9-prefix:469387a2cdb6,42615,1732438015629 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:47:43,517 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5ca6d68759e1b6b2b436e1562757b2e1, had cached 0 bytes from a total of 25018 2024-11-24T08:47:44,089 INFO [FSHLog-0-hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9-prefix:469387a2cdb6,42615,1732438015629 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:47:46,296 INFO [FSHLog-0-hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9-prefix:469387a2cdb6,42615,1732438015629 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:47:48,502 INFO [FSHLog-0-hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9-prefix:469387a2cdb6,42615,1732438015629 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:47:50,506 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T08:47:50,507 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C42615%2C1732438015629.1732438070507 2024-11-24T08:47:53,933 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T08:47:55,524 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5012 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:47:55,527 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5012 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:47:55,527 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 469387a2cdb6%2C42615%2C1732438015629:(num 1732438070507) roll requested 2024-11-24T08:47:55,527 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:47:55,527 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:47:55,527 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:47:55,528 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:47:55,528 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:47:55,528 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438059688 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438070507 2024-11-24T08:47:55,531 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37427:37427),(127.0.0.1/127.0.0.1:45551:45551)] 2024-11-24T08:47:55,531 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438059688 is not closed yet, will try archiving it next time 2024-11-24T08:47:55,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741841_1017 (size=4753) 2024-11-24T08:47:55,532 INFO [regionserver/469387a2cdb6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C42615%2C1732438015629.1732438075531 2024-11-24T08:47:55,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741841_1017 (size=4753) 2024-11-24T08:48:00,537 INFO [FSHLog-0-hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9-prefix:469387a2cdb6,42615,1732438015629 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5003 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:48:00,537 WARN [FSHLog-0-hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9-prefix:469387a2cdb6,42615,1732438015629 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5003 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:48:00,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42615 {}] regionserver.HRegion(8855): Flush requested on 5ca6d68759e1b6b2b436e1562757b2e1 2024-11-24T08:48:00,538 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5ca6d68759e1b6b2b436e1562757b2e1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:48:00,547 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5010 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:48:00,547 WARN [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5010 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:48:02,540 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T08:48:05,544 INFO [FSHLog-0-hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9-prefix:469387a2cdb6,42615,1732438015629 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:48:05,545 WARN [FSHLog-0-hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9-prefix:469387a2cdb6,42615,1732438015629 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK], DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK]] 2024-11-24T08:48:05,545 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:05,546 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:05,546 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:05,547 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:05,547 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:05,548 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438070507 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438075531 2024-11-24T08:48:05,551 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45551:45551),(127.0.0.1/127.0.0.1:37427:37427)] 2024-11-24T08:48:05,551 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438070507 is not closed yet, will try archiving it next time 2024-11-24T08:48:05,551 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 469387a2cdb6%2C42615%2C1732438015629:(num 1732438075531) roll requested 2024-11-24T08:48:05,552 INFO [regionserver/469387a2cdb6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C42615%2C1732438015629.1732438085551 2024-11-24T08:48:05,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741842_1018 (size=1569) 2024-11-24T08:48:05,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741842_1018 (size=1569) 2024-11-24T08:48:05,557 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/.tmp/info/3bb81156027a441e8f93ccf593b1d950 is 1080, key is row0015/info:/1732438057267/Put/seqid=0 2024-11-24T08:48:05,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741844_1020 (size=12509) 2024-11-24T08:48:05,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741844_1020 (size=12509) 2024-11-24T08:48:05,564 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/.tmp/info/3bb81156027a441e8f93ccf593b1d950 2024-11-24T08:48:05,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/.tmp/info/3bb81156027a441e8f93ccf593b1d950 as hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/3bb81156027a441e8f93ccf593b1d950 2024-11-24T08:48:05,584 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/3bb81156027a441e8f93ccf593b1d950, entries=7, sequenceid=31, filesize=12.2 K 2024-11-24T08:48:10,569 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5013 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK], DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK]] 2024-11-24T08:48:10,569 WARN [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5013 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK], DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK]] 2024-11-24T08:48:10,586 INFO [FSHLog-0-hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9-prefix:469387a2cdb6,42615,1732438015629 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK], DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK]] 2024-11-24T08:48:10,586 WARN [FSHLog-0-hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9-prefix:469387a2cdb6,42615,1732438015629 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39953,DS-4fd294c2-0ca7-47a9-98a7-b6d2726e2b93,DISK], DatanodeInfoWithStorage[127.0.0.1:38515,DS-32ed2431-95fd-4963-8db7-50cd1b935c5b,DISK]] 2024-11-24T08:48:10,586 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 5ca6d68759e1b6b2b436e1562757b2e1 in 10048ms, sequenceid=31, compaction requested=true 2024-11-24T08:48:10,586 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:10,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5ca6d68759e1b6b2b436e1562757b2e1: 2024-11-24T08:48:10,586 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:10,586 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-24T08:48:10,586 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:48:10,586 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:10,586 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/095b7edac9a64209b1e465d4ca0be459 because midkey is the same as first or last row 2024-11-24T08:48:10,586 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:10,586 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:10,587 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438075531 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438085551 2024-11-24T08:48:10,588 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45551:45551),(127.0.0.1/127.0.0.1:37427:37427)] 2024-11-24T08:48:10,588 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438075531 is not closed yet, will try archiving it next time 2024-11-24T08:48:10,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5ca6d68759e1b6b2b436e1562757b2e1:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:48:10,588 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438048219 to hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/oldWALs/469387a2cdb6%2C42615%2C1732438015629.1732438048219 2024-11-24T08:48:10,588 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 469387a2cdb6%2C42615%2C1732438015629:(num 1732438090588) roll requested 2024-11-24T08:48:10,588 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C42615%2C1732438015629.1732438090588 2024-11-24T08:48:10,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741843_1019 (size=438) 2024-11-24T08:48:10,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:48:10,590 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438059688 to hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/oldWALs/469387a2cdb6%2C42615%2C1732438015629.1732438059688 2024-11-24T08:48:10,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741843_1019 (size=438) 2024-11-24T08:48:10,592 DEBUG [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:48:10,592 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438070507 to hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/oldWALs/469387a2cdb6%2C42615%2C1732438015629.1732438070507 2024-11-24T08:48:10,593 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438075531 to hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/oldWALs/469387a2cdb6%2C42615%2C1732438015629.1732438075531 2024-11-24T08:48:10,594 DEBUG [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:48:10,596 DEBUG [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] regionserver.HStore(1541): 5ca6d68759e1b6b2b436e1562757b2e1/info is initiating minor compaction (all files) 2024-11-24T08:48:10,597 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:10,597 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:10,597 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:10,597 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:10,597 INFO [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 5ca6d68759e1b6b2b436e1562757b2e1/info in TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1. 2024-11-24T08:48:10,597 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:10,598 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438085551 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438090588 2024-11-24T08:48:10,598 INFO [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/095b7edac9a64209b1e465d4ca0be459, hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/f44aea4a834648dd951be7648c685455, hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/3bb81156027a441e8f93ccf593b1d950] into tmpdir=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/.tmp, totalSize=36.6 K 2024-11-24T08:48:10,599 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37427:37427),(127.0.0.1/127.0.0.1:45551:45551)] 2024-11-24T08:48:10,599 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438085551 is not closed yet, will try archiving it next time 2024-11-24T08:48:10,599 DEBUG [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] compactions.Compactor(225): Compacting 095b7edac9a64209b1e465d4ca0be459, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732438028161 2024-11-24T08:48:10,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741845_1021 (size=93) 2024-11-24T08:48:10,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741845_1021 (size=93) 2024-11-24T08:48:10,600 INFO [regionserver/469387a2cdb6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C42615%2C1732438015629.1732438090600 2024-11-24T08:48:10,601 DEBUG [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] compactions.Compactor(225): Compacting f44aea4a834648dd951be7648c685455, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732438042200 2024-11-24T08:48:10,601 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438085551 to hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/oldWALs/469387a2cdb6%2C42615%2C1732438015629.1732438085551 2024-11-24T08:48:10,602 DEBUG [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3bb81156027a441e8f93ccf593b1d950, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732438057267 2024-11-24T08:48:10,611 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:10,611 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:10,611 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:10,611 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:10,612 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:10,612 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438090588 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438090600 2024-11-24T08:48:10,613 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37427:37427),(127.0.0.1/127.0.0.1:45551:45551)] 2024-11-24T08:48:10,613 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/WALs/469387a2cdb6,42615,1732438015629/469387a2cdb6%2C42615%2C1732438015629.1732438090588 is not closed yet, will try archiving it next time 2024-11-24T08:48:10,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741846_1022 (size=1258) 2024-11-24T08:48:10,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741846_1022 (size=1258) 2024-11-24T08:48:10,632 INFO [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5ca6d68759e1b6b2b436e1562757b2e1#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:48:10,634 DEBUG [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/.tmp/info/d20251e96e17451c8f2aff1af52e5228 is 1080, key is row0001/info:/1732438028161/Put/seqid=0 2024-11-24T08:48:10,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741848_1024 (size=27710) 2024-11-24T08:48:10,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741848_1024 (size=27710) 2024-11-24T08:48:10,651 DEBUG [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/.tmp/info/d20251e96e17451c8f2aff1af52e5228 as hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/d20251e96e17451c8f2aff1af52e5228 2024-11-24T08:48:10,667 INFO [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 5ca6d68759e1b6b2b436e1562757b2e1/info of 5ca6d68759e1b6b2b436e1562757b2e1 into d20251e96e17451c8f2aff1af52e5228(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:48:10,668 DEBUG [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 5ca6d68759e1b6b2b436e1562757b2e1: 2024-11-24T08:48:10,670 INFO [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1., storeName=5ca6d68759e1b6b2b436e1562757b2e1/info, priority=13, startTime=1732438090588; duration=0sec 2024-11-24T08:48:10,670 DEBUG [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-24T08:48:10,670 DEBUG [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:48:10,670 DEBUG [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/d20251e96e17451c8f2aff1af52e5228 because midkey is the same as first or last row 2024-11-24T08:48:10,671 DEBUG [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-24T08:48:10,671 DEBUG [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:48:10,671 DEBUG [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/d20251e96e17451c8f2aff1af52e5228 because midkey is the same as first or last row 2024-11-24T08:48:10,671 DEBUG [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-24T08:48:10,671 DEBUG [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:48:10,671 DEBUG [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/d20251e96e17451c8f2aff1af52e5228 because midkey is the same as first or last row 2024-11-24T08:48:10,671 DEBUG [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:48:10,671 DEBUG [RS:0;469387a2cdb6:42615-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5ca6d68759e1b6b2b436e1562757b2e1:info 2024-11-24T08:48:22,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42615 {}] regionserver.HRegion(8855): Flush requested on 5ca6d68759e1b6b2b436e1562757b2e1 2024-11-24T08:48:22,641 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5ca6d68759e1b6b2b436e1562757b2e1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:48:22,653 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/.tmp/info/f793b3cee86f4878918ffe0187bf1903 is 1080, key is row0022/info:/1732438090601/Put/seqid=0 2024-11-24T08:48:22,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741849_1025 (size=12509) 2024-11-24T08:48:22,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741849_1025 (size=12509) 2024-11-24T08:48:22,661 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/.tmp/info/f793b3cee86f4878918ffe0187bf1903 2024-11-24T08:48:22,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/.tmp/info/f793b3cee86f4878918ffe0187bf1903 as hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/f793b3cee86f4878918ffe0187bf1903 2024-11-24T08:48:22,678 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/f793b3cee86f4878918ffe0187bf1903, entries=7, sequenceid=42, filesize=12.2 K 2024-11-24T08:48:22,680 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 5ca6d68759e1b6b2b436e1562757b2e1 in 40ms, sequenceid=42, compaction requested=false 2024-11-24T08:48:22,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5ca6d68759e1b6b2b436e1562757b2e1: 2024-11-24T08:48:22,680 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-24T08:48:22,680 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:48:22,680 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/d20251e96e17451c8f2aff1af52e5228 because midkey is the same as first or last row 2024-11-24T08:48:23,934 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T08:48:28,517 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5ca6d68759e1b6b2b436e1562757b2e1, had cached 0 bytes from a total of 40219 2024-11-24T08:48:30,663 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T08:48:30,665 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:48:30,665 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:48:30,675 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:48:30,675 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:48:30,676 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T08:48:30,676 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T08:48:30,676 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=856553958, stopped=false 2024-11-24T08:48:30,676 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=469387a2cdb6,37057,1732438014894 2024-11-24T08:48:30,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:48:30,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42615-0x10070e9949f0001, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:48:30,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:30,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42615-0x10070e9949f0001, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:30,677 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:48:30,678 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:48:30,678 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:48:30,678 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:48:30,678 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:48:30,678 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42615-0x10070e9949f0001, quorum=127.0.0.1:62562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:48:30,678 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '469387a2cdb6,42615,1732438015629' ***** 2024-11-24T08:48:30,678 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T08:48:30,679 INFO [RS:0;469387a2cdb6:42615 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T08:48:30,679 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T08:48:30,679 INFO [RS:0;469387a2cdb6:42615 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T08:48:30,679 INFO [RS:0;469387a2cdb6:42615 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T08:48:30,679 INFO [RS:0;469387a2cdb6:42615 {}] regionserver.HRegionServer(3091): Received CLOSE for 5ca6d68759e1b6b2b436e1562757b2e1 2024-11-24T08:48:30,680 INFO [RS:0;469387a2cdb6:42615 {}] regionserver.HRegionServer(959): stopping server 469387a2cdb6,42615,1732438015629 2024-11-24T08:48:30,680 INFO [RS:0;469387a2cdb6:42615 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:48:30,680 INFO [RS:0;469387a2cdb6:42615 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;469387a2cdb6:42615. 2024-11-24T08:48:30,680 DEBUG [RS:0;469387a2cdb6:42615 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:48:30,680 DEBUG [RS:0;469387a2cdb6:42615 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:48:30,680 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5ca6d68759e1b6b2b436e1562757b2e1, disabling compactions & flushes 2024-11-24T08:48:30,680 INFO [RS:0;469387a2cdb6:42615 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T08:48:30,680 INFO [RS:0;469387a2cdb6:42615 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T08:48:30,680 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1. 2024-11-24T08:48:30,680 INFO [RS:0;469387a2cdb6:42615 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T08:48:30,680 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1. 2024-11-24T08:48:30,680 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1. after waiting 0 ms 2024-11-24T08:48:30,680 INFO [RS:0;469387a2cdb6:42615 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T08:48:30,680 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1. 2024-11-24T08:48:30,681 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 5ca6d68759e1b6b2b436e1562757b2e1 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-24T08:48:30,681 INFO [RS:0;469387a2cdb6:42615 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T08:48:30,681 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:48:30,681 DEBUG [RS:0;469387a2cdb6:42615 {}] regionserver.HRegionServer(1325): Online Regions={5ca6d68759e1b6b2b436e1562757b2e1=TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1., 1588230740=hbase:meta,,1.1588230740} 2024-11-24T08:48:30,681 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:48:30,681 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:48:30,681 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:48:30,681 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:48:30,681 DEBUG [RS:0;469387a2cdb6:42615 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 5ca6d68759e1b6b2b436e1562757b2e1 2024-11-24T08:48:30,681 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-24T08:48:30,686 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/.tmp/info/e903e31a49aa473faa13e6b180167c48 is 1080, key is row0029/info:/1732438104646/Put/seqid=0 2024-11-24T08:48:30,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741850_1026 (size=8193) 2024-11-24T08:48:30,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741850_1026 (size=8193) 2024-11-24T08:48:30,693 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/.tmp/info/e903e31a49aa473faa13e6b180167c48 2024-11-24T08:48:30,703 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/hbase/meta/1588230740/.tmp/info/442aa7913d964609a8f9eb09a4a6ef2a is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1./info:regioninfo/1732438018540/Put/seqid=0 2024-11-24T08:48:30,704 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/.tmp/info/e903e31a49aa473faa13e6b180167c48 as hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/e903e31a49aa473faa13e6b180167c48 2024-11-24T08:48:30,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741851_1027 (size=7016) 2024-11-24T08:48:30,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741851_1027 (size=7016) 2024-11-24T08:48:30,714 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/hbase/meta/1588230740/.tmp/info/442aa7913d964609a8f9eb09a4a6ef2a 2024-11-24T08:48:30,716 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/e903e31a49aa473faa13e6b180167c48, entries=3, sequenceid=48, filesize=8.0 K 2024-11-24T08:48:30,718 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 5ca6d68759e1b6b2b436e1562757b2e1 in 37ms, sequenceid=48, compaction requested=true 2024-11-24T08:48:30,718 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/095b7edac9a64209b1e465d4ca0be459, hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/f44aea4a834648dd951be7648c685455, hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/3bb81156027a441e8f93ccf593b1d950] to archive 2024-11-24T08:48:30,721 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T08:48:30,725 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/095b7edac9a64209b1e465d4ca0be459 to hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/archive/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/095b7edac9a64209b1e465d4ca0be459 2024-11-24T08:48:30,726 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/f44aea4a834648dd951be7648c685455 to hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/archive/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/f44aea4a834648dd951be7648c685455 2024-11-24T08:48:30,728 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/3bb81156027a441e8f93ccf593b1d950 to hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/archive/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/info/3bb81156027a441e8f93ccf593b1d950 2024-11-24T08:48:30,737 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/hbase/meta/1588230740/.tmp/ns/528be72e7d944bf5872f6a18d4048300 is 43, key is default/ns:d/1732438017822/Put/seqid=0 2024-11-24T08:48:30,739 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=469387a2cdb6:37057 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-24T08:48:30,740 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [095b7edac9a64209b1e465d4ca0be459=12509, f44aea4a834648dd951be7648c685455=12509, 3bb81156027a441e8f93ccf593b1d950=12509] 2024-11-24T08:48:30,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741852_1028 (size=5153) 2024-11-24T08:48:30,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741852_1028 (size=5153) 2024-11-24T08:48:30,744 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/hbase/meta/1588230740/.tmp/ns/528be72e7d944bf5872f6a18d4048300 2024-11-24T08:48:30,746 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/default/TestLogRolling-testSlowSyncLogRolling/5ca6d68759e1b6b2b436e1562757b2e1/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-24T08:48:30,748 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1. 2024-11-24T08:48:30,749 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5ca6d68759e1b6b2b436e1562757b2e1: Waiting for close lock at 1732438110680Running coprocessor pre-close hooks at 1732438110680Disabling compacts and flushes for region at 1732438110680Disabling writes for close at 1732438110680Obtaining lock to block concurrent updates at 1732438110681 (+1 ms)Preparing flush snapshotting stores in 5ca6d68759e1b6b2b436e1562757b2e1 at 1732438110681Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732438110681Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1. at 1732438110682 (+1 ms)Flushing 5ca6d68759e1b6b2b436e1562757b2e1/info: creating writer at 1732438110682Flushing 5ca6d68759e1b6b2b436e1562757b2e1/info: appending metadata at 1732438110685 (+3 ms)Flushing 5ca6d68759e1b6b2b436e1562757b2e1/info: closing flushed file at 1732438110685Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7ca53a0f: reopening flushed file at 1732438110702 (+17 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 5ca6d68759e1b6b2b436e1562757b2e1 in 37ms, sequenceid=48, compaction requested=true at 1732438110718 (+16 ms)Writing region close event to WAL at 1732438110741 (+23 ms)Running coprocessor post-close hooks at 1732438110747 (+6 ms)Closed at 1732438110748 (+1 ms) 2024-11-24T08:48:30,749 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732438018083.5ca6d68759e1b6b2b436e1562757b2e1. 2024-11-24T08:48:30,771 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/hbase/meta/1588230740/.tmp/table/88a7334c14b64bc0b0280be9daeeeda3 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732438018557/Put/seqid=0 2024-11-24T08:48:30,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741853_1029 (size=5396) 2024-11-24T08:48:30,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741853_1029 (size=5396) 2024-11-24T08:48:30,777 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/hbase/meta/1588230740/.tmp/table/88a7334c14b64bc0b0280be9daeeeda3 2024-11-24T08:48:30,785 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/hbase/meta/1588230740/.tmp/info/442aa7913d964609a8f9eb09a4a6ef2a as hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/hbase/meta/1588230740/info/442aa7913d964609a8f9eb09a4a6ef2a 2024-11-24T08:48:30,793 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/hbase/meta/1588230740/info/442aa7913d964609a8f9eb09a4a6ef2a, entries=10, sequenceid=11, filesize=6.9 K 2024-11-24T08:48:30,795 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/hbase/meta/1588230740/.tmp/ns/528be72e7d944bf5872f6a18d4048300 as hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/hbase/meta/1588230740/ns/528be72e7d944bf5872f6a18d4048300 2024-11-24T08:48:30,802 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/hbase/meta/1588230740/ns/528be72e7d944bf5872f6a18d4048300, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T08:48:30,803 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/hbase/meta/1588230740/.tmp/table/88a7334c14b64bc0b0280be9daeeeda3 as hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/hbase/meta/1588230740/table/88a7334c14b64bc0b0280be9daeeeda3 2024-11-24T08:48:30,811 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/hbase/meta/1588230740/table/88a7334c14b64bc0b0280be9daeeeda3, entries=2, sequenceid=11, filesize=5.3 K 2024-11-24T08:48:30,812 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 131ms, sequenceid=11, compaction requested=false 2024-11-24T08:48:30,818 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T08:48:30,819 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:48:30,819 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:48:30,820 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732438110681Running coprocessor pre-close hooks at 1732438110681Disabling compacts and flushes for region at 1732438110681Disabling writes for close at 1732438110681Obtaining lock to block concurrent updates at 1732438110681Preparing flush snapshotting stores in 1588230740 at 1732438110681Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732438110682 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732438110682Flushing 1588230740/info: creating writer at 1732438110682Flushing 1588230740/info: appending metadata at 1732438110703 (+21 ms)Flushing 1588230740/info: closing flushed file at 1732438110703Flushing 1588230740/ns: creating writer at 1732438110722 (+19 ms)Flushing 1588230740/ns: appending metadata at 1732438110736 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732438110736Flushing 1588230740/table: creating writer at 1732438110752 (+16 ms)Flushing 1588230740/table: appending metadata at 1732438110770 (+18 ms)Flushing 1588230740/table: closing flushed file at 1732438110770Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37b89bb9: reopening flushed file at 1732438110784 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4bd6394e: reopening flushed file at 1732438110793 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32674c3d: reopening flushed file at 1732438110802 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 131ms, sequenceid=11, compaction requested=false at 1732438110812 (+10 ms)Writing region close event to WAL at 1732438110813 (+1 ms)Running coprocessor post-close hooks at 1732438110819 (+6 ms)Closed at 1732438110819 2024-11-24T08:48:30,820 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T08:48:30,881 INFO [RS:0;469387a2cdb6:42615 {}] regionserver.HRegionServer(976): stopping server 469387a2cdb6,42615,1732438015629; all regions closed. 2024-11-24T08:48:30,883 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:30,884 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:30,884 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:30,884 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:30,884 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:30,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741834_1010 (size=3066) 2024-11-24T08:48:30,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741834_1010 (size=3066) 2024-11-24T08:48:30,894 DEBUG [RS:0;469387a2cdb6:42615 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/oldWALs 2024-11-24T08:48:30,894 INFO [RS:0;469387a2cdb6:42615 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 469387a2cdb6%2C42615%2C1732438015629.meta:.meta(num 1732438017679) 2024-11-24T08:48:30,895 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:30,895 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:30,895 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:30,896 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:30,896 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:30,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741847_1023 (size=12695) 2024-11-24T08:48:30,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741847_1023 (size=12695) 2024-11-24T08:48:30,902 DEBUG [RS:0;469387a2cdb6:42615 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/oldWALs 2024-11-24T08:48:30,902 INFO [RS:0;469387a2cdb6:42615 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 469387a2cdb6%2C42615%2C1732438015629:(num 1732438090600) 2024-11-24T08:48:30,902 DEBUG [RS:0;469387a2cdb6:42615 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:48:30,902 INFO [RS:0;469387a2cdb6:42615 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:48:30,902 INFO [RS:0;469387a2cdb6:42615 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:48:30,902 INFO [RS:0;469387a2cdb6:42615 {}] hbase.ChoreService(370): Chore service for: regionserver/469387a2cdb6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T08:48:30,903 INFO [RS:0;469387a2cdb6:42615 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:48:30,903 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:48:30,903 INFO [RS:0;469387a2cdb6:42615 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42615 2024-11-24T08:48:30,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42615-0x10070e9949f0001, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/469387a2cdb6,42615,1732438015629 2024-11-24T08:48:30,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:48:30,906 INFO [RS:0;469387a2cdb6:42615 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:48:30,907 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [469387a2cdb6,42615,1732438015629] 2024-11-24T08:48:30,907 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/469387a2cdb6,42615,1732438015629 already deleted, retry=false 2024-11-24T08:48:30,908 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 469387a2cdb6,42615,1732438015629 expired; onlineServers=0 2024-11-24T08:48:30,908 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '469387a2cdb6,37057,1732438014894' ***** 2024-11-24T08:48:30,908 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T08:48:30,908 INFO [M:0;469387a2cdb6:37057 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:48:30,908 INFO [M:0;469387a2cdb6:37057 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:48:30,908 DEBUG [M:0;469387a2cdb6:37057 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T08:48:30,908 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T08:48:30,908 DEBUG [M:0;469387a2cdb6:37057 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T08:48:30,908 DEBUG [master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.large.0-1732438016804 {}] cleaner.HFileCleaner(306): Exit Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.large.0-1732438016804,5,FailOnTimeoutGroup] 2024-11-24T08:48:30,908 DEBUG [master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.small.0-1732438016809 {}] cleaner.HFileCleaner(306): Exit Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.small.0-1732438016809,5,FailOnTimeoutGroup] 2024-11-24T08:48:30,909 INFO [M:0;469387a2cdb6:37057 {}] hbase.ChoreService(370): Chore service for: master/469387a2cdb6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T08:48:30,909 INFO [M:0;469387a2cdb6:37057 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:48:30,909 DEBUG [M:0;469387a2cdb6:37057 {}] master.HMaster(1795): Stopping service threads 2024-11-24T08:48:30,909 INFO [M:0;469387a2cdb6:37057 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T08:48:30,909 INFO [M:0;469387a2cdb6:37057 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:48:30,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T08:48:30,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:30,909 INFO [M:0;469387a2cdb6:37057 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T08:48:30,910 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T08:48:30,910 DEBUG [M:0;469387a2cdb6:37057 {}] zookeeper.ZKUtil(347): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T08:48:30,910 WARN [M:0;469387a2cdb6:37057 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T08:48:30,911 INFO [M:0;469387a2cdb6:37057 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/.lastflushedseqids 2024-11-24T08:48:30,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741854_1030 (size=130) 2024-11-24T08:48:30,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741854_1030 (size=130) 2024-11-24T08:48:30,923 INFO [M:0;469387a2cdb6:37057 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T08:48:30,923 INFO [M:0;469387a2cdb6:37057 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T08:48:30,923 DEBUG [M:0;469387a2cdb6:37057 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:48:30,923 INFO [M:0;469387a2cdb6:37057 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:48:30,923 DEBUG [M:0;469387a2cdb6:37057 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:48:30,924 DEBUG [M:0;469387a2cdb6:37057 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:48:30,924 DEBUG [M:0;469387a2cdb6:37057 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:48:30,924 INFO [M:0;469387a2cdb6:37057 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-11-24T08:48:30,942 DEBUG [M:0;469387a2cdb6:37057 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/58cacb1d46d74c6c94c5b31239bb3618 is 82, key is hbase:meta,,1/info:regioninfo/1732438017750/Put/seqid=0 2024-11-24T08:48:30,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741855_1031 (size=5672) 2024-11-24T08:48:30,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741855_1031 (size=5672) 2024-11-24T08:48:30,949 INFO [M:0;469387a2cdb6:37057 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/58cacb1d46d74c6c94c5b31239bb3618 2024-11-24T08:48:30,971 DEBUG [M:0;469387a2cdb6:37057 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dc8cd9b996e24feca371964b8cceafa8 is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732438018564/Put/seqid=0 2024-11-24T08:48:30,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741856_1032 (size=6248) 2024-11-24T08:48:30,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741856_1032 (size=6248) 2024-11-24T08:48:30,977 INFO [M:0;469387a2cdb6:37057 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dc8cd9b996e24feca371964b8cceafa8 2024-11-24T08:48:30,983 INFO [M:0;469387a2cdb6:37057 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for dc8cd9b996e24feca371964b8cceafa8 2024-11-24T08:48:30,999 DEBUG [M:0;469387a2cdb6:37057 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1ad718b4910346bbac2d0d327838c069 is 69, key is 469387a2cdb6,42615,1732438015629/rs:state/1732438016936/Put/seqid=0 2024-11-24T08:48:31,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741857_1033 (size=5156) 2024-11-24T08:48:31,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741857_1033 (size=5156) 2024-11-24T08:48:31,005 INFO [M:0;469387a2cdb6:37057 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1ad718b4910346bbac2d0d327838c069 2024-11-24T08:48:31,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42615-0x10070e9949f0001, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:48:31,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42615-0x10070e9949f0001, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:48:31,008 INFO [RS:0;469387a2cdb6:42615 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:48:31,008 INFO [RS:0;469387a2cdb6:42615 {}] regionserver.HRegionServer(1031): Exiting; stopping=469387a2cdb6,42615,1732438015629; zookeeper connection closed. 2024-11-24T08:48:31,009 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@32eaff10 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@32eaff10 2024-11-24T08:48:31,009 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T08:48:31,018 INFO [regionserver/469387a2cdb6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:48:31,028 DEBUG [M:0;469387a2cdb6:37057 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ea2fd1aafed2426a904779af7f24f242 is 52, key is load_balancer_on/state:d/1732438018062/Put/seqid=0 2024-11-24T08:48:31,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741858_1034 (size=5056) 2024-11-24T08:48:31,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741858_1034 (size=5056) 2024-11-24T08:48:31,035 INFO [M:0;469387a2cdb6:37057 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ea2fd1aafed2426a904779af7f24f242 2024-11-24T08:48:31,044 DEBUG [M:0;469387a2cdb6:37057 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/58cacb1d46d74c6c94c5b31239bb3618 as hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/58cacb1d46d74c6c94c5b31239bb3618 2024-11-24T08:48:31,050 INFO [M:0;469387a2cdb6:37057 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/58cacb1d46d74c6c94c5b31239bb3618, entries=8, sequenceid=59, filesize=5.5 K 2024-11-24T08:48:31,052 DEBUG [M:0;469387a2cdb6:37057 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dc8cd9b996e24feca371964b8cceafa8 as hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/dc8cd9b996e24feca371964b8cceafa8 2024-11-24T08:48:31,058 INFO [M:0;469387a2cdb6:37057 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for dc8cd9b996e24feca371964b8cceafa8 2024-11-24T08:48:31,059 INFO [M:0;469387a2cdb6:37057 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/dc8cd9b996e24feca371964b8cceafa8, entries=6, sequenceid=59, filesize=6.1 K 2024-11-24T08:48:31,060 DEBUG [M:0;469387a2cdb6:37057 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1ad718b4910346bbac2d0d327838c069 as hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1ad718b4910346bbac2d0d327838c069 2024-11-24T08:48:31,066 INFO [M:0;469387a2cdb6:37057 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1ad718b4910346bbac2d0d327838c069, entries=1, sequenceid=59, filesize=5.0 K 2024-11-24T08:48:31,067 DEBUG [M:0;469387a2cdb6:37057 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ea2fd1aafed2426a904779af7f24f242 as hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ea2fd1aafed2426a904779af7f24f242 2024-11-24T08:48:31,074 INFO [M:0;469387a2cdb6:37057 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ea2fd1aafed2426a904779af7f24f242, entries=1, sequenceid=59, filesize=4.9 K 2024-11-24T08:48:31,075 INFO [M:0;469387a2cdb6:37057 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 151ms, sequenceid=59, compaction requested=false 2024-11-24T08:48:31,077 INFO [M:0;469387a2cdb6:37057 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:48:31,077 DEBUG [M:0;469387a2cdb6:37057 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732438110923Disabling compacts and flushes for region at 1732438110923Disabling writes for close at 1732438110924 (+1 ms)Obtaining lock to block concurrent updates at 1732438110924Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732438110924Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1732438110925 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732438110925Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732438110926 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732438110942 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732438110942Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732438110955 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732438110970 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732438110970Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732438110983 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732438110998 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732438110998Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732438111012 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732438111027 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732438111027Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37c54bca: reopening flushed file at 1732438111042 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@28744494: reopening flushed file at 1732438111050 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f9075: reopening flushed file at 1732438111059 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@217df2b: reopening flushed file at 1732438111066 (+7 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 151ms, sequenceid=59, compaction requested=false at 1732438111075 (+9 ms)Writing region close event to WAL at 1732438111077 (+2 ms)Closed at 1732438111077 2024-11-24T08:48:31,078 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:31,078 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:31,078 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:31,078 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:31,079 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:31,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39953 is added to blk_1073741830_1006 (size=27985) 2024-11-24T08:48:31,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38515 is added to blk_1073741830_1006 (size=27985) 2024-11-24T08:48:31,082 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:48:31,082 INFO [M:0;469387a2cdb6:37057 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T08:48:31,082 INFO [M:0;469387a2cdb6:37057 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37057 2024-11-24T08:48:31,082 INFO [M:0;469387a2cdb6:37057 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:48:31,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:48:31,184 INFO [M:0;469387a2cdb6:37057 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:48:31,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37057-0x10070e9949f0000, quorum=127.0.0.1:62562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:48:31,193 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@78be0d39{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:48:31,197 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@617aa169{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:48:31,197 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:48:31,197 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e06ea5e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:48:31,198 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1612a852{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/hadoop.log.dir/,STOPPED} 2024-11-24T08:48:31,201 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:48:31,201 WARN [BP-291149696-172.17.0.2-1732438011710 heartbeating to localhost/127.0.0.1:34511 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:48:31,201 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:48:31,201 WARN [BP-291149696-172.17.0.2-1732438011710 heartbeating to localhost/127.0.0.1:34511 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-291149696-172.17.0.2-1732438011710 (Datanode Uuid ae1c979a-d3d3-4e8e-904b-2aa3bd1750ae) service to localhost/127.0.0.1:34511 2024-11-24T08:48:31,203 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/cluster_c6eb8802-84f1-9b1f-e14f-989c3f59ec7e/data/data3/current/BP-291149696-172.17.0.2-1732438011710 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:48:31,204 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/cluster_c6eb8802-84f1-9b1f-e14f-989c3f59ec7e/data/data4/current/BP-291149696-172.17.0.2-1732438011710 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:48:31,204 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:48:31,207 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@32c41a8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:48:31,207 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21c64e78{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:48:31,207 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:48:31,207 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@198fe7a1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:48:31,208 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@616d254c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/hadoop.log.dir/,STOPPED} 2024-11-24T08:48:31,209 WARN [BP-291149696-172.17.0.2-1732438011710 heartbeating to localhost/127.0.0.1:34511 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:48:31,209 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:48:31,209 WARN [BP-291149696-172.17.0.2-1732438011710 heartbeating to localhost/127.0.0.1:34511 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-291149696-172.17.0.2-1732438011710 (Datanode Uuid a8d1a49c-f448-4e92-a06f-c45124bf4596) service to localhost/127.0.0.1:34511 2024-11-24T08:48:31,209 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:48:31,210 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/cluster_c6eb8802-84f1-9b1f-e14f-989c3f59ec7e/data/data1/current/BP-291149696-172.17.0.2-1732438011710 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:48:31,210 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/cluster_c6eb8802-84f1-9b1f-e14f-989c3f59ec7e/data/data2/current/BP-291149696-172.17.0.2-1732438011710 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:48:31,211 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:48:31,225 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5f961078{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:48:31,226 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@25dfddc5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:48:31,226 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:48:31,226 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@455f3457{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:48:31,226 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75bdea07{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/hadoop.log.dir/,STOPPED} 2024-11-24T08:48:31,234 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T08:48:31,266 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T08:48:31,274 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/469387a2cdb6:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34511 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34511 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:34511 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:34511 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@59b906bd java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/469387a2cdb6:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:34511 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34511 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:34511 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: regionserver/469387a2cdb6:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34511 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=131 (was 347), ProcessCount=11 (was 11), AvailableMemoryMB=2262 (was 2770) 2024-11-24T08:48:31,280 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=131, ProcessCount=11, AvailableMemoryMB=2261 2024-11-24T08:48:31,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T08:48:31,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/hadoop.log.dir so I do NOT create it in target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11 2024-11-24T08:48:31,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/de4d73b8-d379-d8e5-045d-aae240589ff6/hadoop.tmp.dir so I do NOT create it in target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11 2024-11-24T08:48:31,281 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/cluster_bb36238f-4bdc-3f2d-d6a0-b13055d59e83, deleteOnExit=true 2024-11-24T08:48:31,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T08:48:31,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/test.cache.data in system properties and HBase conf 2024-11-24T08:48:31,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T08:48:31,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/hadoop.log.dir in system properties and HBase conf 2024-11-24T08:48:31,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T08:48:31,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T08:48:31,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T08:48:31,282 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T08:48:31,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:48:31,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:48:31,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T08:48:31,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:48:31,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T08:48:31,283 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T08:48:31,283 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:48:31,283 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:48:31,283 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T08:48:31,283 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/nfs.dump.dir in system properties and HBase conf 2024-11-24T08:48:31,283 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/java.io.tmpdir in system properties and HBase conf 2024-11-24T08:48:31,283 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:48:31,283 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T08:48:31,283 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T08:48:31,298 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:48:31,352 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:48:31,359 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:48:31,360 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:48:31,360 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:48:31,360 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:48:31,361 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:48:31,361 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20a8e12e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:48:31,361 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43d42cad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:48:31,456 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4d2990b6{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/java.io.tmpdir/jetty-localhost-46375-hadoop-hdfs-3_4_1-tests_jar-_-any-15844877862332378894/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:48:31,457 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1c65b16c{HTTP/1.1, (http/1.1)}{localhost:46375} 2024-11-24T08:48:31,457 INFO [Time-limited test {}] server.Server(415): Started @102074ms 2024-11-24T08:48:31,469 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:48:31,525 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:48:31,529 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:48:31,530 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:48:31,530 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:48:31,530 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:48:31,531 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77123380{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:48:31,531 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10608edc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:48:31,627 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@25a1dc3a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/java.io.tmpdir/jetty-localhost-36881-hadoop-hdfs-3_4_1-tests_jar-_-any-11884663647973055005/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:48:31,628 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@b0f1483{HTTP/1.1, (http/1.1)}{localhost:36881} 2024-11-24T08:48:31,628 INFO [Time-limited test {}] server.Server(415): Started @102245ms 2024-11-24T08:48:31,629 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:48:31,665 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:48:31,670 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:48:31,671 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:48:31,671 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:48:31,672 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:48:31,672 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a6fded0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:48:31,673 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a6a53c2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:48:31,710 WARN [Thread-436 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/cluster_bb36238f-4bdc-3f2d-d6a0-b13055d59e83/data/data1/current/BP-2050211809-172.17.0.2-1732438111309/current, will proceed with Du for space computation calculation, 2024-11-24T08:48:31,711 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/cluster_bb36238f-4bdc-3f2d-d6a0-b13055d59e83/data/data2/current/BP-2050211809-172.17.0.2-1732438111309/current, will proceed with Du for space computation calculation, 2024-11-24T08:48:31,727 WARN [Thread-415 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:48:31,730 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x30e47f25b332061d with lease ID 0x3c638d26c2ef2442: Processing first storage report for DS-3c113d60-670b-4e59-ba98-acc65edc6fee from datanode DatanodeRegistration(127.0.0.1:46763, datanodeUuid=e3b60a79-564c-4b0d-9527-2f7d7d43964b, infoPort=41621, infoSecurePort=0, ipcPort=46163, storageInfo=lv=-57;cid=testClusterID;nsid=713926287;c=1732438111309) 2024-11-24T08:48:31,730 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x30e47f25b332061d with lease ID 0x3c638d26c2ef2442: from storage DS-3c113d60-670b-4e59-ba98-acc65edc6fee node DatanodeRegistration(127.0.0.1:46763, datanodeUuid=e3b60a79-564c-4b0d-9527-2f7d7d43964b, infoPort=41621, infoSecurePort=0, ipcPort=46163, storageInfo=lv=-57;cid=testClusterID;nsid=713926287;c=1732438111309), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:48:31,730 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x30e47f25b332061d with lease ID 0x3c638d26c2ef2442: Processing first storage report for DS-f9981001-0800-4bdc-9bf5-08916e986ec9 from datanode DatanodeRegistration(127.0.0.1:46763, datanodeUuid=e3b60a79-564c-4b0d-9527-2f7d7d43964b, infoPort=41621, infoSecurePort=0, ipcPort=46163, storageInfo=lv=-57;cid=testClusterID;nsid=713926287;c=1732438111309) 2024-11-24T08:48:31,730 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x30e47f25b332061d with lease ID 0x3c638d26c2ef2442: from storage DS-f9981001-0800-4bdc-9bf5-08916e986ec9 node DatanodeRegistration(127.0.0.1:46763, datanodeUuid=e3b60a79-564c-4b0d-9527-2f7d7d43964b, infoPort=41621, infoSecurePort=0, ipcPort=46163, storageInfo=lv=-57;cid=testClusterID;nsid=713926287;c=1732438111309), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:48:31,778 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7ee81724{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/java.io.tmpdir/jetty-localhost-34579-hadoop-hdfs-3_4_1-tests_jar-_-any-8303510800898484344/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:48:31,778 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4bb27f40{HTTP/1.1, (http/1.1)}{localhost:34579} 2024-11-24T08:48:31,779 INFO [Time-limited test {}] server.Server(415): Started @102395ms 2024-11-24T08:48:31,780 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:48:31,839 WARN [Thread-462 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/cluster_bb36238f-4bdc-3f2d-d6a0-b13055d59e83/data/data3/current/BP-2050211809-172.17.0.2-1732438111309/current, will proceed with Du for space computation calculation, 2024-11-24T08:48:31,839 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/cluster_bb36238f-4bdc-3f2d-d6a0-b13055d59e83/data/data4/current/BP-2050211809-172.17.0.2-1732438111309/current, will proceed with Du for space computation calculation, 2024-11-24T08:48:31,855 WARN [Thread-451 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:48:31,857 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8bb4a428fdd2b28d with lease ID 0x3c638d26c2ef2443: Processing first storage report for DS-46838b8d-1c51-467c-a865-88a84386f245 from datanode DatanodeRegistration(127.0.0.1:32903, datanodeUuid=91c823e2-1b3b-4526-b4f4-c8c204bbf6f3, infoPort=40963, infoSecurePort=0, ipcPort=45865, storageInfo=lv=-57;cid=testClusterID;nsid=713926287;c=1732438111309) 2024-11-24T08:48:31,857 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8bb4a428fdd2b28d with lease ID 0x3c638d26c2ef2443: from storage DS-46838b8d-1c51-467c-a865-88a84386f245 node DatanodeRegistration(127.0.0.1:32903, datanodeUuid=91c823e2-1b3b-4526-b4f4-c8c204bbf6f3, infoPort=40963, infoSecurePort=0, ipcPort=45865, storageInfo=lv=-57;cid=testClusterID;nsid=713926287;c=1732438111309), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:48:31,857 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8bb4a428fdd2b28d with lease ID 0x3c638d26c2ef2443: Processing first storage report for DS-5b894138-ac6e-461c-8ec2-01419a1d6e70 from datanode DatanodeRegistration(127.0.0.1:32903, datanodeUuid=91c823e2-1b3b-4526-b4f4-c8c204bbf6f3, infoPort=40963, infoSecurePort=0, ipcPort=45865, storageInfo=lv=-57;cid=testClusterID;nsid=713926287;c=1732438111309) 2024-11-24T08:48:31,858 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8bb4a428fdd2b28d with lease ID 0x3c638d26c2ef2443: from storage DS-5b894138-ac6e-461c-8ec2-01419a1d6e70 node DatanodeRegistration(127.0.0.1:32903, datanodeUuid=91c823e2-1b3b-4526-b4f4-c8c204bbf6f3, infoPort=40963, infoSecurePort=0, ipcPort=45865, storageInfo=lv=-57;cid=testClusterID;nsid=713926287;c=1732438111309), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:48:31,910 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11 2024-11-24T08:48:31,916 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/cluster_bb36238f-4bdc-3f2d-d6a0-b13055d59e83/zookeeper_0, clientPort=65091, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/cluster_bb36238f-4bdc-3f2d-d6a0-b13055d59e83/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/cluster_bb36238f-4bdc-3f2d-d6a0-b13055d59e83/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T08:48:31,917 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=65091 2024-11-24T08:48:31,917 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:48:31,919 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:48:31,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32903 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:48:31,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:48:31,931 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb with version=8 2024-11-24T08:48:31,931 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/hbase-staging 2024-11-24T08:48:31,933 INFO [Time-limited test {}] client.ConnectionUtils(128): master/469387a2cdb6:0 server-side Connection retries=45 2024-11-24T08:48:31,933 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:48:31,933 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:48:31,933 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:48:31,933 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:48:31,933 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:48:31,933 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T08:48:31,934 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:48:31,934 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41373 2024-11-24T08:48:31,936 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41373 connecting to ZooKeeper ensemble=127.0.0.1:65091 2024-11-24T08:48:31,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:413730x0, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:48:31,941 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41373-0x10070eb13190000 connected 2024-11-24T08:48:31,952 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:48:31,954 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:48:31,957 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:48:31,957 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb, hbase.cluster.distributed=false 2024-11-24T08:48:31,959 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:48:31,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41373 2024-11-24T08:48:31,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41373 2024-11-24T08:48:31,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41373 2024-11-24T08:48:31,961 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41373 2024-11-24T08:48:31,961 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41373 2024-11-24T08:48:31,981 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/469387a2cdb6:0 server-side Connection retries=45 2024-11-24T08:48:31,981 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:48:31,981 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:48:31,981 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:48:31,981 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:48:31,981 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:48:31,981 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T08:48:31,981 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:48:31,982 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42783 2024-11-24T08:48:31,983 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42783 connecting to ZooKeeper ensemble=127.0.0.1:65091 2024-11-24T08:48:31,984 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:48:31,986 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:48:31,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:427830x0, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:48:31,990 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:427830x0, quorum=127.0.0.1:65091, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:48:31,990 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42783-0x10070eb13190001 connected 2024-11-24T08:48:31,991 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T08:48:31,991 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T08:48:31,992 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42783-0x10070eb13190001, quorum=127.0.0.1:65091, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T08:48:31,993 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42783-0x10070eb13190001, quorum=127.0.0.1:65091, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:48:31,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42783 2024-11-24T08:48:31,997 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42783 2024-11-24T08:48:32,001 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42783 2024-11-24T08:48:32,001 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42783 2024-11-24T08:48:32,002 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42783 2024-11-24T08:48:32,013 DEBUG [M:0;469387a2cdb6:41373 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;469387a2cdb6:41373 2024-11-24T08:48:32,013 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/469387a2cdb6,41373,1732438111933 2024-11-24T08:48:32,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42783-0x10070eb13190001, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:48:32,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:48:32,015 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/469387a2cdb6,41373,1732438111933 2024-11-24T08:48:32,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42783-0x10070eb13190001, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T08:48:32,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:32,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42783-0x10070eb13190001, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:32,017 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T08:48:32,017 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/469387a2cdb6,41373,1732438111933 from backup master directory 2024-11-24T08:48:32,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/469387a2cdb6,41373,1732438111933 2024-11-24T08:48:32,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42783-0x10070eb13190001, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:48:32,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:48:32,018 WARN [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:48:32,018 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=469387a2cdb6,41373,1732438111933 2024-11-24T08:48:32,023 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/hbase.id] with ID: 06cbc246-92d0-40f6-bed2-6317c9664ad0 2024-11-24T08:48:32,023 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/.tmp/hbase.id 2024-11-24T08:48:32,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:48:32,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32903 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:48:32,035 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/.tmp/hbase.id]:[hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/hbase.id] 2024-11-24T08:48:32,057 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:48:32,057 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T08:48:32,059 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-24T08:48:32,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42783-0x10070eb13190001, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:32,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:32,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:48:32,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32903 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:48:32,075 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:48:32,075 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T08:48:32,076 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:48:32,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32903 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:48:32,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:48:32,087 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store 2024-11-24T08:48:32,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:48:32,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32903 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:48:32,095 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:48:32,095 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:48:32,095 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:48:32,095 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:48:32,095 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:48:32,095 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:48:32,095 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:48:32,096 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732438112095Disabling compacts and flushes for region at 1732438112095Disabling writes for close at 1732438112095Writing region close event to WAL at 1732438112095Closed at 1732438112095 2024-11-24T08:48:32,097 WARN [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/.initializing 2024-11-24T08:48:32,097 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/WALs/469387a2cdb6,41373,1732438111933 2024-11-24T08:48:32,101 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=469387a2cdb6%2C41373%2C1732438111933, suffix=, logDir=hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/WALs/469387a2cdb6,41373,1732438111933, archiveDir=hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/oldWALs, maxLogs=10 2024-11-24T08:48:32,101 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C41373%2C1732438111933.1732438112101 2024-11-24T08:48:32,108 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/WALs/469387a2cdb6,41373,1732438111933/469387a2cdb6%2C41373%2C1732438111933.1732438112101 2024-11-24T08:48:32,109 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40963:40963),(127.0.0.1/127.0.0.1:41621:41621)] 2024-11-24T08:48:32,113 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:48:32,113 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:48:32,113 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:32,113 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:32,115 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:32,117 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T08:48:32,117 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:32,118 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:48:32,118 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:32,120 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T08:48:32,120 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:32,121 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:48:32,121 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:32,124 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T08:48:32,124 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:32,125 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:48:32,125 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:32,127 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T08:48:32,127 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:32,128 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:48:32,128 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:32,129 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:32,130 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:32,132 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:32,132 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:32,133 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T08:48:32,135 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:32,138 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:48:32,138 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=813163, jitterRate=0.03399085998535156}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T08:48:32,140 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732438112114Initializing all the Stores at 1732438112115 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438112115Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438112115Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438112115Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438112115Cleaning up temporary data from old regions at 1732438112132 (+17 ms)Region opened successfully at 1732438112140 (+8 ms) 2024-11-24T08:48:32,141 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T08:48:32,146 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e8fa04f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=469387a2cdb6/172.17.0.2:0 2024-11-24T08:48:32,147 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T08:48:32,147 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T08:48:32,147 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T08:48:32,148 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T08:48:32,149 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T08:48:32,149 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T08:48:32,149 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T08:48:32,152 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T08:48:32,154 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T08:48:32,155 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T08:48:32,155 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T08:48:32,156 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T08:48:32,157 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T08:48:32,158 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T08:48:32,159 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T08:48:32,160 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T08:48:32,161 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T08:48:32,161 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T08:48:32,164 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T08:48:32,164 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T08:48:32,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42783-0x10070eb13190001, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:48:32,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:48:32,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42783-0x10070eb13190001, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:32,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:32,166 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=469387a2cdb6,41373,1732438111933, sessionid=0x10070eb13190000, setting cluster-up flag (Was=false) 2024-11-24T08:48:32,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42783-0x10070eb13190001, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:32,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:32,172 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T08:48:32,173 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=469387a2cdb6,41373,1732438111933 2024-11-24T08:48:32,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42783-0x10070eb13190001, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:32,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:32,179 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T08:48:32,180 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=469387a2cdb6,41373,1732438111933 2024-11-24T08:48:32,181 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T08:48:32,183 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T08:48:32,183 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T08:48:32,183 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T08:48:32,184 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 469387a2cdb6,41373,1732438111933 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T08:48:32,185 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:48:32,185 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:48:32,185 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:48:32,186 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:48:32,186 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/469387a2cdb6:0, corePoolSize=10, maxPoolSize=10 2024-11-24T08:48:32,186 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:32,186 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/469387a2cdb6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:48:32,186 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:32,186 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732438142186 2024-11-24T08:48:32,187 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T08:48:32,187 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T08:48:32,187 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T08:48:32,187 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T08:48:32,187 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T08:48:32,187 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T08:48:32,187 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:32,188 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T08:48:32,188 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T08:48:32,188 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T08:48:32,188 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:48:32,188 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T08:48:32,188 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T08:48:32,188 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T08:48:32,188 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.large.0-1732438112188,5,FailOnTimeoutGroup] 2024-11-24T08:48:32,188 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.small.0-1732438112188,5,FailOnTimeoutGroup] 2024-11-24T08:48:32,189 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:32,189 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T08:48:32,189 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:32,189 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:32,189 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:32,189 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T08:48:32,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:48:32,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32903 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:48:32,200 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T08:48:32,200 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb 2024-11-24T08:48:32,203 INFO [RS:0;469387a2cdb6:42783 {}] regionserver.HRegionServer(746): ClusterId : 06cbc246-92d0-40f6-bed2-6317c9664ad0 2024-11-24T08:48:32,204 DEBUG [RS:0;469387a2cdb6:42783 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T08:48:32,206 DEBUG [RS:0;469387a2cdb6:42783 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T08:48:32,206 DEBUG [RS:0;469387a2cdb6:42783 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T08:48:32,208 DEBUG [RS:0;469387a2cdb6:42783 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T08:48:32,208 DEBUG [RS:0;469387a2cdb6:42783 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53508d1e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=469387a2cdb6/172.17.0.2:0 2024-11-24T08:48:32,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32903 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:48:32,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:48:32,214 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:48:32,218 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:48:32,220 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:48:32,220 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:32,221 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:48:32,221 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:48:32,222 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:48:32,223 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:32,223 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:48:32,223 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:48:32,224 DEBUG [RS:0;469387a2cdb6:42783 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;469387a2cdb6:42783 2024-11-24T08:48:32,224 INFO [RS:0;469387a2cdb6:42783 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T08:48:32,224 INFO [RS:0;469387a2cdb6:42783 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T08:48:32,224 DEBUG [RS:0;469387a2cdb6:42783 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T08:48:32,225 INFO [RS:0;469387a2cdb6:42783 {}] regionserver.HRegionServer(2659): reportForDuty to master=469387a2cdb6,41373,1732438111933 with port=42783, startcode=1732438111980 2024-11-24T08:48:32,225 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:48:32,225 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:32,226 DEBUG [RS:0;469387a2cdb6:42783 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T08:48:32,226 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:48:32,226 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:48:32,228 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:48:32,228 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:32,229 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:48:32,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:48:32,230 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/data/hbase/meta/1588230740 2024-11-24T08:48:32,230 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/data/hbase/meta/1588230740 2024-11-24T08:48:32,231 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36809, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T08:48:32,232 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:48:32,232 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:48:32,232 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41373 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 469387a2cdb6,42783,1732438111980 2024-11-24T08:48:32,232 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41373 {}] master.ServerManager(517): Registering regionserver=469387a2cdb6,42783,1732438111980 2024-11-24T08:48:32,233 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:48:32,235 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:48:32,235 DEBUG [RS:0;469387a2cdb6:42783 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb 2024-11-24T08:48:32,235 DEBUG [RS:0;469387a2cdb6:42783 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35981 2024-11-24T08:48:32,235 DEBUG [RS:0;469387a2cdb6:42783 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T08:48:32,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:48:32,238 DEBUG [RS:0;469387a2cdb6:42783 {}] zookeeper.ZKUtil(111): regionserver:42783-0x10070eb13190001, quorum=127.0.0.1:65091, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/469387a2cdb6,42783,1732438111980 2024-11-24T08:48:32,238 WARN [RS:0;469387a2cdb6:42783 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:48:32,238 INFO [RS:0;469387a2cdb6:42783 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:48:32,238 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [469387a2cdb6,42783,1732438111980] 2024-11-24T08:48:32,238 DEBUG [RS:0;469387a2cdb6:42783 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/WALs/469387a2cdb6,42783,1732438111980 2024-11-24T08:48:32,238 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:48:32,239 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=838875, jitterRate=0.06668564677238464}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:48:32,240 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732438112214Initializing all the Stores at 1732438112217 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438112217Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438112217Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438112217Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438112217Cleaning up temporary data from old regions at 1732438112232 (+15 ms)Region opened successfully at 1732438112240 (+8 ms) 2024-11-24T08:48:32,240 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:48:32,240 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:48:32,240 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:48:32,240 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:48:32,241 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:48:32,241 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:48:32,241 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732438112240Disabling compacts and flushes for region at 1732438112240Disabling writes for close at 1732438112240Writing region close event to WAL at 1732438112241 (+1 ms)Closed at 1732438112241 2024-11-24T08:48:32,243 INFO [RS:0;469387a2cdb6:42783 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T08:48:32,243 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:48:32,243 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T08:48:32,243 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T08:48:32,245 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:48:32,246 INFO [RS:0;469387a2cdb6:42783 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T08:48:32,247 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T08:48:32,249 INFO [RS:0;469387a2cdb6:42783 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T08:48:32,249 INFO [RS:0;469387a2cdb6:42783 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:32,253 INFO [RS:0;469387a2cdb6:42783 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T08:48:32,254 INFO [RS:0;469387a2cdb6:42783 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T08:48:32,254 INFO [RS:0;469387a2cdb6:42783 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:32,254 DEBUG [RS:0;469387a2cdb6:42783 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:32,254 DEBUG [RS:0;469387a2cdb6:42783 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:32,254 DEBUG [RS:0;469387a2cdb6:42783 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:32,255 DEBUG [RS:0;469387a2cdb6:42783 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:32,255 DEBUG [RS:0;469387a2cdb6:42783 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:32,255 DEBUG [RS:0;469387a2cdb6:42783 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/469387a2cdb6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:48:32,255 DEBUG [RS:0;469387a2cdb6:42783 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:32,255 DEBUG [RS:0;469387a2cdb6:42783 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:32,255 DEBUG [RS:0;469387a2cdb6:42783 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:32,255 DEBUG [RS:0;469387a2cdb6:42783 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:32,255 DEBUG [RS:0;469387a2cdb6:42783 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:32,255 DEBUG [RS:0;469387a2cdb6:42783 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:32,255 DEBUG [RS:0;469387a2cdb6:42783 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/469387a2cdb6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:48:32,255 DEBUG [RS:0;469387a2cdb6:42783 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:48:32,256 INFO [RS:0;469387a2cdb6:42783 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:32,256 INFO [RS:0;469387a2cdb6:42783 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:32,256 INFO [RS:0;469387a2cdb6:42783 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:32,256 INFO [RS:0;469387a2cdb6:42783 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:32,256 INFO [RS:0;469387a2cdb6:42783 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:32,256 INFO [RS:0;469387a2cdb6:42783 {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,42783,1732438111980-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:48:32,272 INFO [RS:0;469387a2cdb6:42783 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T08:48:32,272 INFO [RS:0;469387a2cdb6:42783 {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,42783,1732438111980-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:32,272 INFO [RS:0;469387a2cdb6:42783 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:32,272 INFO [RS:0;469387a2cdb6:42783 {}] regionserver.Replication(171): 469387a2cdb6,42783,1732438111980 started 2024-11-24T08:48:32,287 INFO [RS:0;469387a2cdb6:42783 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:32,288 INFO [RS:0;469387a2cdb6:42783 {}] regionserver.HRegionServer(1482): Serving as 469387a2cdb6,42783,1732438111980, RpcServer on 469387a2cdb6/172.17.0.2:42783, sessionid=0x10070eb13190001 2024-11-24T08:48:32,288 DEBUG [RS:0;469387a2cdb6:42783 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T08:48:32,288 DEBUG [RS:0;469387a2cdb6:42783 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 469387a2cdb6,42783,1732438111980 2024-11-24T08:48:32,288 DEBUG [RS:0;469387a2cdb6:42783 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '469387a2cdb6,42783,1732438111980' 2024-11-24T08:48:32,288 DEBUG [RS:0;469387a2cdb6:42783 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T08:48:32,289 DEBUG [RS:0;469387a2cdb6:42783 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T08:48:32,289 DEBUG [RS:0;469387a2cdb6:42783 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T08:48:32,289 DEBUG [RS:0;469387a2cdb6:42783 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T08:48:32,289 DEBUG [RS:0;469387a2cdb6:42783 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 469387a2cdb6,42783,1732438111980 2024-11-24T08:48:32,289 DEBUG [RS:0;469387a2cdb6:42783 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '469387a2cdb6,42783,1732438111980' 2024-11-24T08:48:32,289 DEBUG [RS:0;469387a2cdb6:42783 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T08:48:32,290 DEBUG [RS:0;469387a2cdb6:42783 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T08:48:32,290 DEBUG [RS:0;469387a2cdb6:42783 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T08:48:32,290 INFO [RS:0;469387a2cdb6:42783 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T08:48:32,290 INFO [RS:0;469387a2cdb6:42783 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T08:48:32,394 INFO [RS:0;469387a2cdb6:42783 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=469387a2cdb6%2C42783%2C1732438111980, suffix=, logDir=hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/WALs/469387a2cdb6,42783,1732438111980, archiveDir=hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/oldWALs, maxLogs=32 2024-11-24T08:48:32,397 WARN [469387a2cdb6:41373 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T08:48:32,397 INFO [RS:0;469387a2cdb6:42783 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C42783%2C1732438111980.1732438112397 2024-11-24T08:48:32,405 INFO [RS:0;469387a2cdb6:42783 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/WALs/469387a2cdb6,42783,1732438111980/469387a2cdb6%2C42783%2C1732438111980.1732438112397 2024-11-24T08:48:32,407 DEBUG [RS:0;469387a2cdb6:42783 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40963:40963),(127.0.0.1/127.0.0.1:41621:41621)] 2024-11-24T08:48:32,648 DEBUG [469387a2cdb6:41373 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T08:48:32,649 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=469387a2cdb6,42783,1732438111980 2024-11-24T08:48:32,654 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 469387a2cdb6,42783,1732438111980, state=OPENING 2024-11-24T08:48:32,656 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T08:48:32,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:32,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42783-0x10070eb13190001, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:32,660 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:48:32,660 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:48:32,660 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=469387a2cdb6,42783,1732438111980}] 2024-11-24T08:48:32,660 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:48:32,815 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T08:48:32,820 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40567, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T08:48:32,828 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T08:48:32,829 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:48:32,831 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=469387a2cdb6%2C42783%2C1732438111980.meta, suffix=.meta, logDir=hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/WALs/469387a2cdb6,42783,1732438111980, archiveDir=hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/oldWALs, maxLogs=32 2024-11-24T08:48:32,833 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C42783%2C1732438111980.meta.1732438112833.meta 2024-11-24T08:48:32,839 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/WALs/469387a2cdb6,42783,1732438111980/469387a2cdb6%2C42783%2C1732438111980.meta.1732438112833.meta 2024-11-24T08:48:32,841 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40963:40963),(127.0.0.1/127.0.0.1:41621:41621)] 2024-11-24T08:48:32,841 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:48:32,842 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T08:48:32,842 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T08:48:32,842 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T08:48:32,842 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T08:48:32,842 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:48:32,842 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T08:48:32,842 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T08:48:32,844 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:48:32,845 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:48:32,845 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:32,845 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:48:32,846 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:48:32,846 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:48:32,847 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:32,847 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:48:32,847 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:48:32,848 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:48:32,848 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:32,849 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:48:32,849 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:48:32,850 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:48:32,850 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:32,850 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:48:32,850 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:48:32,851 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/data/hbase/meta/1588230740 2024-11-24T08:48:32,852 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/data/hbase/meta/1588230740 2024-11-24T08:48:32,854 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:48:32,854 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:48:32,855 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:48:32,856 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:48:32,857 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=803472, jitterRate=0.021668165922164917}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:48:32,858 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T08:48:32,859 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732438112842Writing region info on filesystem at 1732438112843 (+1 ms)Initializing all the Stores at 1732438112844 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438112844Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438112844Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438112844Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438112844Cleaning up temporary data from old regions at 1732438112854 (+10 ms)Running coprocessor post-open hooks at 1732438112858 (+4 ms)Region opened successfully at 1732438112859 (+1 ms) 2024-11-24T08:48:32,860 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732438112815 2024-11-24T08:48:32,863 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T08:48:32,863 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T08:48:32,864 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=469387a2cdb6,42783,1732438111980 2024-11-24T08:48:32,866 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 469387a2cdb6,42783,1732438111980, state=OPEN 2024-11-24T08:48:32,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:48:32,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42783-0x10070eb13190001, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:48:32,868 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=469387a2cdb6,42783,1732438111980 2024-11-24T08:48:32,868 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:48:32,868 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:48:32,872 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T08:48:32,872 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=469387a2cdb6,42783,1732438111980 in 208 msec 2024-11-24T08:48:32,875 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T08:48:32,875 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 629 msec 2024-11-24T08:48:32,876 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:48:32,876 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T08:48:32,877 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:48:32,877 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=469387a2cdb6,42783,1732438111980, seqNum=-1] 2024-11-24T08:48:32,878 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:48:32,879 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59339, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:48:32,885 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 702 msec 2024-11-24T08:48:32,886 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732438112886, completionTime=-1 2024-11-24T08:48:32,886 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T08:48:32,886 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T08:48:32,888 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T08:48:32,888 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732438172888 2024-11-24T08:48:32,888 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732438232888 2024-11-24T08:48:32,888 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-24T08:48:32,889 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,41373,1732438111933-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:32,889 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,41373,1732438111933-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:32,889 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,41373,1732438111933-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:32,889 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-469387a2cdb6:41373, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:32,889 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:32,889 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:32,891 DEBUG [master/469387a2cdb6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T08:48:32,894 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.876sec 2024-11-24T08:48:32,894 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T08:48:32,894 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T08:48:32,894 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T08:48:32,894 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T08:48:32,894 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T08:48:32,894 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,41373,1732438111933-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:48:32,894 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,41373,1732438111933-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T08:48:32,897 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T08:48:32,897 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T08:48:32,897 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,41373,1732438111933-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:32,904 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d7283c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:48:32,904 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 469387a2cdb6,41373,-1 for getting cluster id 2024-11-24T08:48:32,904 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T08:48:32,906 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '06cbc246-92d0-40f6-bed2-6317c9664ad0' 2024-11-24T08:48:32,906 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T08:48:32,906 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "06cbc246-92d0-40f6-bed2-6317c9664ad0" 2024-11-24T08:48:32,907 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7167d31c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:48:32,907 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [469387a2cdb6,41373,-1] 2024-11-24T08:48:32,907 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T08:48:32,907 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:48:32,909 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55234, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T08:48:32,910 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d6631e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:48:32,910 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:48:32,912 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=469387a2cdb6,42783,1732438111980, seqNum=-1] 2024-11-24T08:48:32,912 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:48:32,914 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47596, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:48:32,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=469387a2cdb6,41373,1732438111933 2024-11-24T08:48:32,917 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:48:32,920 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T08:48:32,920 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T08:48:32,920 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:48:32,921 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:48:32,921 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:48:32,921 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:48:32,921 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T08:48:32,921 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T08:48:32,921 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=117325850, stopped=false 2024-11-24T08:48:32,922 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=469387a2cdb6,41373,1732438111933 2024-11-24T08:48:32,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42783-0x10070eb13190001, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:48:32,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:48:32,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42783-0x10070eb13190001, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:32,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:32,923 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:48:32,923 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:48:32,923 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42783-0x10070eb13190001, quorum=127.0.0.1:65091, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:48:32,923 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:48:32,923 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:48:32,924 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:48:32,924 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '469387a2cdb6,42783,1732438111980' ***** 2024-11-24T08:48:32,924 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T08:48:32,924 INFO [RS:0;469387a2cdb6:42783 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T08:48:32,924 INFO [RS:0;469387a2cdb6:42783 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T08:48:32,924 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T08:48:32,924 INFO [RS:0;469387a2cdb6:42783 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T08:48:32,924 INFO [RS:0;469387a2cdb6:42783 {}] regionserver.HRegionServer(959): stopping server 469387a2cdb6,42783,1732438111980 2024-11-24T08:48:32,924 INFO [RS:0;469387a2cdb6:42783 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:48:32,924 INFO [RS:0;469387a2cdb6:42783 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;469387a2cdb6:42783. 2024-11-24T08:48:32,925 DEBUG [RS:0;469387a2cdb6:42783 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:48:32,925 DEBUG [RS:0;469387a2cdb6:42783 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:48:32,925 INFO [RS:0;469387a2cdb6:42783 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T08:48:32,925 INFO [RS:0;469387a2cdb6:42783 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T08:48:32,925 INFO [RS:0;469387a2cdb6:42783 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T08:48:32,925 INFO [RS:0;469387a2cdb6:42783 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T08:48:32,925 INFO [RS:0;469387a2cdb6:42783 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-24T08:48:32,925 DEBUG [RS:0;469387a2cdb6:42783 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-24T08:48:32,925 DEBUG [RS:0;469387a2cdb6:42783 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-24T08:48:32,925 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:48:32,925 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:48:32,926 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:48:32,926 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:48:32,926 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:48:32,926 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-24T08:48:32,948 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/data/hbase/meta/1588230740/.tmp/ns/379a5221a9ef417190dc1a38db5e31db is 43, key is default/ns:d/1732438112880/Put/seqid=0 2024-11-24T08:48:32,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32903 is added to blk_1073741835_1011 (size=5153) 2024-11-24T08:48:32,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741835_1011 (size=5153) 2024-11-24T08:48:32,954 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/data/hbase/meta/1588230740/.tmp/ns/379a5221a9ef417190dc1a38db5e31db 2024-11-24T08:48:32,962 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/data/hbase/meta/1588230740/.tmp/ns/379a5221a9ef417190dc1a38db5e31db as hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/data/hbase/meta/1588230740/ns/379a5221a9ef417190dc1a38db5e31db 2024-11-24T08:48:32,969 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/data/hbase/meta/1588230740/ns/379a5221a9ef417190dc1a38db5e31db, entries=2, sequenceid=6, filesize=5.0 K 2024-11-24T08:48:32,971 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 44ms, sequenceid=6, compaction requested=false 2024-11-24T08:48:32,976 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-24T08:48:32,976 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:48:32,977 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:48:32,977 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732438112925Running coprocessor pre-close hooks at 1732438112925Disabling compacts and flushes for region at 1732438112925Disabling writes for close at 1732438112926 (+1 ms)Obtaining lock to block concurrent updates at 1732438112926Preparing flush snapshotting stores in 1588230740 at 1732438112926Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732438112926Flushing stores of hbase:meta,,1.1588230740 at 1732438112927 (+1 ms)Flushing 1588230740/ns: creating writer at 1732438112927Flushing 1588230740/ns: appending metadata at 1732438112947 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1732438112947Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37042d2c: reopening flushed file at 1732438112961 (+14 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 44ms, sequenceid=6, compaction requested=false at 1732438112971 (+10 ms)Writing region close event to WAL at 1732438112971Running coprocessor post-close hooks at 1732438112976 (+5 ms)Closed at 1732438112977 (+1 ms) 2024-11-24T08:48:32,977 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T08:48:33,125 INFO [RS:0;469387a2cdb6:42783 {}] regionserver.HRegionServer(976): stopping server 469387a2cdb6,42783,1732438111980; all regions closed. 2024-11-24T08:48:33,126 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:33,126 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:33,127 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:33,127 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:33,127 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:33,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741834_1010 (size=1152) 2024-11-24T08:48:33,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32903 is added to blk_1073741834_1010 (size=1152) 2024-11-24T08:48:33,132 DEBUG [RS:0;469387a2cdb6:42783 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/oldWALs 2024-11-24T08:48:33,133 INFO [RS:0;469387a2cdb6:42783 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 469387a2cdb6%2C42783%2C1732438111980.meta:.meta(num 1732438112833) 2024-11-24T08:48:33,133 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:33,133 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:33,133 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:33,134 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:33,134 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:33,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741833_1009 (size=93) 2024-11-24T08:48:33,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32903 is added to blk_1073741833_1009 (size=93) 2024-11-24T08:48:33,139 DEBUG [RS:0;469387a2cdb6:42783 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/oldWALs 2024-11-24T08:48:33,139 INFO [RS:0;469387a2cdb6:42783 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 469387a2cdb6%2C42783%2C1732438111980:(num 1732438112397) 2024-11-24T08:48:33,139 DEBUG [RS:0;469387a2cdb6:42783 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:48:33,139 INFO [RS:0;469387a2cdb6:42783 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:48:33,139 INFO [RS:0;469387a2cdb6:42783 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:48:33,139 INFO [RS:0;469387a2cdb6:42783 {}] hbase.ChoreService(370): Chore service for: regionserver/469387a2cdb6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T08:48:33,139 INFO [RS:0;469387a2cdb6:42783 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:48:33,139 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:48:33,140 INFO [RS:0;469387a2cdb6:42783 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42783 2024-11-24T08:48:33,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:48:33,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42783-0x10070eb13190001, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/469387a2cdb6,42783,1732438111980 2024-11-24T08:48:33,141 INFO [RS:0;469387a2cdb6:42783 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:48:33,142 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [469387a2cdb6,42783,1732438111980] 2024-11-24T08:48:33,143 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/469387a2cdb6,42783,1732438111980 already deleted, retry=false 2024-11-24T08:48:33,143 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 469387a2cdb6,42783,1732438111980 expired; onlineServers=0 2024-11-24T08:48:33,143 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '469387a2cdb6,41373,1732438111933' ***** 2024-11-24T08:48:33,143 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T08:48:33,143 INFO [M:0;469387a2cdb6:41373 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:48:33,143 INFO [M:0;469387a2cdb6:41373 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:48:33,143 DEBUG [M:0;469387a2cdb6:41373 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T08:48:33,143 DEBUG [M:0;469387a2cdb6:41373 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T08:48:33,143 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T08:48:33,143 DEBUG [master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.small.0-1732438112188 {}] cleaner.HFileCleaner(306): Exit Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.small.0-1732438112188,5,FailOnTimeoutGroup] 2024-11-24T08:48:33,143 DEBUG [master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.large.0-1732438112188 {}] cleaner.HFileCleaner(306): Exit Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.large.0-1732438112188,5,FailOnTimeoutGroup] 2024-11-24T08:48:33,144 INFO [M:0;469387a2cdb6:41373 {}] hbase.ChoreService(370): Chore service for: master/469387a2cdb6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T08:48:33,144 INFO [M:0;469387a2cdb6:41373 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:48:33,144 DEBUG [M:0;469387a2cdb6:41373 {}] master.HMaster(1795): Stopping service threads 2024-11-24T08:48:33,144 INFO [M:0;469387a2cdb6:41373 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T08:48:33,144 INFO [M:0;469387a2cdb6:41373 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:48:33,144 INFO [M:0;469387a2cdb6:41373 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T08:48:33,144 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T08:48:33,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T08:48:33,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:33,145 DEBUG [M:0;469387a2cdb6:41373 {}] zookeeper.ZKUtil(347): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T08:48:33,145 WARN [M:0;469387a2cdb6:41373 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T08:48:33,145 INFO [M:0;469387a2cdb6:41373 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/.lastflushedseqids 2024-11-24T08:48:33,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741836_1012 (size=99) 2024-11-24T08:48:33,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32903 is added to blk_1073741836_1012 (size=99) 2024-11-24T08:48:33,153 INFO [M:0;469387a2cdb6:41373 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T08:48:33,153 INFO [M:0;469387a2cdb6:41373 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T08:48:33,153 DEBUG [M:0;469387a2cdb6:41373 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:48:33,153 INFO [M:0;469387a2cdb6:41373 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:48:33,153 DEBUG [M:0;469387a2cdb6:41373 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:48:33,153 DEBUG [M:0;469387a2cdb6:41373 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:48:33,153 DEBUG [M:0;469387a2cdb6:41373 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:48:33,154 INFO [M:0;469387a2cdb6:41373 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-24T08:48:33,170 DEBUG [M:0;469387a2cdb6:41373 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a13afdbfafae43b0ad69662dea692ebf is 82, key is hbase:meta,,1/info:regioninfo/1732438112864/Put/seqid=0 2024-11-24T08:48:33,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32903 is added to blk_1073741837_1013 (size=5672) 2024-11-24T08:48:33,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741837_1013 (size=5672) 2024-11-24T08:48:33,176 INFO [M:0;469387a2cdb6:41373 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a13afdbfafae43b0ad69662dea692ebf 2024-11-24T08:48:33,195 DEBUG [M:0;469387a2cdb6:41373 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cecbe878cd254218a6a23d56319364df is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732438112885/Put/seqid=0 2024-11-24T08:48:33,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741838_1014 (size=5275) 2024-11-24T08:48:33,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32903 is added to blk_1073741838_1014 (size=5275) 2024-11-24T08:48:33,201 INFO [M:0;469387a2cdb6:41373 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cecbe878cd254218a6a23d56319364df 2024-11-24T08:48:33,224 DEBUG [M:0;469387a2cdb6:41373 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/38b104b39089498d8cd807e9d2bd1255 is 69, key is 469387a2cdb6,42783,1732438111980/rs:state/1732438112232/Put/seqid=0 2024-11-24T08:48:33,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32903 is added to blk_1073741839_1015 (size=5156) 2024-11-24T08:48:33,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741839_1015 (size=5156) 2024-11-24T08:48:33,230 INFO [M:0;469387a2cdb6:41373 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/38b104b39089498d8cd807e9d2bd1255 2024-11-24T08:48:33,242 INFO [RS:0;469387a2cdb6:42783 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:48:33,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42783-0x10070eb13190001, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:48:33,242 INFO [RS:0;469387a2cdb6:42783 {}] regionserver.HRegionServer(1031): Exiting; stopping=469387a2cdb6,42783,1732438111980; zookeeper connection closed. 2024-11-24T08:48:33,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42783-0x10070eb13190001, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:48:33,243 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3db094f3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3db094f3 2024-11-24T08:48:33,243 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T08:48:33,251 DEBUG [M:0;469387a2cdb6:41373 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1c57e1e454b84dc6b5566bb1cc22b2d9 is 52, key is load_balancer_on/state:d/1732438112919/Put/seqid=0 2024-11-24T08:48:33,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741840_1016 (size=5056) 2024-11-24T08:48:33,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32903 is added to blk_1073741840_1016 (size=5056) 2024-11-24T08:48:33,257 INFO [M:0;469387a2cdb6:41373 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1c57e1e454b84dc6b5566bb1cc22b2d9 2024-11-24T08:48:33,264 DEBUG [M:0;469387a2cdb6:41373 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a13afdbfafae43b0ad69662dea692ebf as hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a13afdbfafae43b0ad69662dea692ebf 2024-11-24T08:48:33,270 INFO [M:0;469387a2cdb6:41373 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a13afdbfafae43b0ad69662dea692ebf, entries=8, sequenceid=29, filesize=5.5 K 2024-11-24T08:48:33,271 DEBUG [M:0;469387a2cdb6:41373 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cecbe878cd254218a6a23d56319364df as hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cecbe878cd254218a6a23d56319364df 2024-11-24T08:48:33,277 INFO [M:0;469387a2cdb6:41373 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cecbe878cd254218a6a23d56319364df, entries=3, sequenceid=29, filesize=5.2 K 2024-11-24T08:48:33,279 DEBUG [M:0;469387a2cdb6:41373 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/38b104b39089498d8cd807e9d2bd1255 as hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/38b104b39089498d8cd807e9d2bd1255 2024-11-24T08:48:33,285 INFO [M:0;469387a2cdb6:41373 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/38b104b39089498d8cd807e9d2bd1255, entries=1, sequenceid=29, filesize=5.0 K 2024-11-24T08:48:33,286 DEBUG [M:0;469387a2cdb6:41373 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1c57e1e454b84dc6b5566bb1cc22b2d9 as hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1c57e1e454b84dc6b5566bb1cc22b2d9 2024-11-24T08:48:33,293 INFO [M:0;469387a2cdb6:41373 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35981/user/jenkins/test-data/6d5446f7-299e-3068-e125-d6fcfa6436bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1c57e1e454b84dc6b5566bb1cc22b2d9, entries=1, sequenceid=29, filesize=4.9 K 2024-11-24T08:48:33,295 INFO [M:0;469387a2cdb6:41373 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 142ms, sequenceid=29, compaction requested=false 2024-11-24T08:48:33,296 INFO [M:0;469387a2cdb6:41373 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:48:33,297 DEBUG [M:0;469387a2cdb6:41373 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732438113153Disabling compacts and flushes for region at 1732438113153Disabling writes for close at 1732438113153Obtaining lock to block concurrent updates at 1732438113154 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732438113154Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732438113154Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732438113155 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732438113155Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732438113169 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732438113169Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732438113181 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732438113195 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732438113195Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732438113206 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732438113223 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732438113223Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732438113236 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732438113251 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732438113251Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b73aba0: reopening flushed file at 1732438113263 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34022fd2: reopening flushed file at 1732438113270 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79a56eb1: reopening flushed file at 1732438113278 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f530c9d: reopening flushed file at 1732438113285 (+7 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 142ms, sequenceid=29, compaction requested=false at 1732438113295 (+10 ms)Writing region close event to WAL at 1732438113296 (+1 ms)Closed at 1732438113296 2024-11-24T08:48:33,297 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:33,297 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:33,297 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:33,297 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:33,298 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:33,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741830_1006 (size=10311) 2024-11-24T08:48:33,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32903 is added to blk_1073741830_1006 (size=10311) 2024-11-24T08:48:33,301 INFO [M:0;469387a2cdb6:41373 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T08:48:33,301 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:48:33,301 INFO [M:0;469387a2cdb6:41373 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41373 2024-11-24T08:48:33,301 INFO [M:0;469387a2cdb6:41373 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:48:33,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:48:33,403 INFO [M:0;469387a2cdb6:41373 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:48:33,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41373-0x10070eb13190000, quorum=127.0.0.1:65091, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:48:33,409 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7ee81724{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:48:33,410 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4bb27f40{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:48:33,410 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:48:33,411 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a6a53c2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:48:33,411 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a6fded0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/hadoop.log.dir/,STOPPED} 2024-11-24T08:48:33,414 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:48:33,414 WARN [BP-2050211809-172.17.0.2-1732438111309 heartbeating to localhost/127.0.0.1:35981 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:48:33,414 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:48:33,414 WARN [BP-2050211809-172.17.0.2-1732438111309 heartbeating to localhost/127.0.0.1:35981 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2050211809-172.17.0.2-1732438111309 (Datanode Uuid 91c823e2-1b3b-4526-b4f4-c8c204bbf6f3) service to localhost/127.0.0.1:35981 2024-11-24T08:48:33,414 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/cluster_bb36238f-4bdc-3f2d-d6a0-b13055d59e83/data/data3/current/BP-2050211809-172.17.0.2-1732438111309 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:48:33,415 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/cluster_bb36238f-4bdc-3f2d-d6a0-b13055d59e83/data/data4/current/BP-2050211809-172.17.0.2-1732438111309 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:48:33,415 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:48:33,417 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@25a1dc3a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:48:33,417 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@b0f1483{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:48:33,417 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:48:33,417 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10608edc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:48:33,417 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77123380{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/hadoop.log.dir/,STOPPED} 2024-11-24T08:48:33,418 WARN [BP-2050211809-172.17.0.2-1732438111309 heartbeating to localhost/127.0.0.1:35981 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:48:33,418 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:48:33,418 WARN [BP-2050211809-172.17.0.2-1732438111309 heartbeating to localhost/127.0.0.1:35981 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2050211809-172.17.0.2-1732438111309 (Datanode Uuid e3b60a79-564c-4b0d-9527-2f7d7d43964b) service to localhost/127.0.0.1:35981 2024-11-24T08:48:33,418 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:48:33,419 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/cluster_bb36238f-4bdc-3f2d-d6a0-b13055d59e83/data/data1/current/BP-2050211809-172.17.0.2-1732438111309 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:48:33,419 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/cluster_bb36238f-4bdc-3f2d-d6a0-b13055d59e83/data/data2/current/BP-2050211809-172.17.0.2-1732438111309 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:48:33,419 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:48:33,424 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4d2990b6{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:48:33,425 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1c65b16c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:48:33,425 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:48:33,425 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43d42cad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:48:33,425 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20a8e12e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/hadoop.log.dir/,STOPPED} 2024-11-24T08:48:33,432 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T08:48:33,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T08:48:33,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T08:48:33,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/hadoop.log.dir so I do NOT create it in target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137 2024-11-24T08:48:33,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7cb6c5d1-dff3-f528-a598-b81a8b700f11/hadoop.tmp.dir so I do NOT create it in target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137 2024-11-24T08:48:33,457 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198, deleteOnExit=true 2024-11-24T08:48:33,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T08:48:33,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/test.cache.data in system properties and HBase conf 2024-11-24T08:48:33,458 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T08:48:33,458 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/hadoop.log.dir in system properties and HBase conf 2024-11-24T08:48:33,458 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T08:48:33,458 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T08:48:33,458 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T08:48:33,458 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T08:48:33,459 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:48:33,459 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:48:33,459 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T08:48:33,459 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:48:33,459 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T08:48:33,459 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T08:48:33,459 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:48:33,460 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:48:33,460 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T08:48:33,460 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/nfs.dump.dir in system properties and HBase conf 2024-11-24T08:48:33,460 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/java.io.tmpdir in system properties and HBase conf 2024-11-24T08:48:33,460 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:48:33,460 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T08:48:33,460 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T08:48:33,472 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:48:33,521 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:48:33,526 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:48:33,529 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:48:33,529 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:48:33,529 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:48:33,530 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:48:33,531 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@29ce44bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:48:33,531 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@9fcb45e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:48:33,626 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@410b0043{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/java.io.tmpdir/jetty-localhost-33975-hadoop-hdfs-3_4_1-tests_jar-_-any-11387070091443865296/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:48:33,627 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@63fdee04{HTTP/1.1, (http/1.1)}{localhost:33975} 2024-11-24T08:48:33,627 INFO [Time-limited test {}] server.Server(415): Started @104244ms 2024-11-24T08:48:33,639 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:48:33,690 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:48:33,693 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:48:33,694 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:48:33,694 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:48:33,694 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:48:33,695 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@500184cd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:48:33,695 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@284f089c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:48:33,789 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6973f479{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/java.io.tmpdir/jetty-localhost-46205-hadoop-hdfs-3_4_1-tests_jar-_-any-3926551735566918764/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:48:33,789 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@289e7e24{HTTP/1.1, (http/1.1)}{localhost:46205} 2024-11-24T08:48:33,789 INFO [Time-limited test {}] server.Server(415): Started @104406ms 2024-11-24T08:48:33,791 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:48:33,822 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:48:33,826 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:48:33,827 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:48:33,827 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:48:33,827 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:48:33,827 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30312faa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:48:33,828 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c52e06b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:48:33,861 WARN [Thread-655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data1/current/BP-1677742332-172.17.0.2-1732438113484/current, will proceed with Du for space computation calculation, 2024-11-24T08:48:33,861 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data2/current/BP-1677742332-172.17.0.2-1732438113484/current, will proceed with Du for space computation calculation, 2024-11-24T08:48:33,884 WARN [Thread-634 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:48:33,888 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6f231b0ad5c44d86 with lease ID 0x93b51bcf413bec79: Processing first storage report for DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850 from datanode DatanodeRegistration(127.0.0.1:37885, datanodeUuid=5a12bab1-d62c-4022-a015-3366aacc7760, infoPort=46357, infoSecurePort=0, ipcPort=42701, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484) 2024-11-24T08:48:33,888 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f231b0ad5c44d86 with lease ID 0x93b51bcf413bec79: from storage DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850 node DatanodeRegistration(127.0.0.1:37885, datanodeUuid=5a12bab1-d62c-4022-a015-3366aacc7760, infoPort=46357, infoSecurePort=0, ipcPort=42701, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:48:33,888 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6f231b0ad5c44d86 with lease ID 0x93b51bcf413bec79: Processing first storage report for DS-17d0fee8-513d-4960-b809-8cb286a91030 from datanode DatanodeRegistration(127.0.0.1:37885, datanodeUuid=5a12bab1-d62c-4022-a015-3366aacc7760, infoPort=46357, infoSecurePort=0, ipcPort=42701, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484) 2024-11-24T08:48:33,888 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f231b0ad5c44d86 with lease ID 0x93b51bcf413bec79: from storage DS-17d0fee8-513d-4960-b809-8cb286a91030 node DatanodeRegistration(127.0.0.1:37885, datanodeUuid=5a12bab1-d62c-4022-a015-3366aacc7760, infoPort=46357, infoSecurePort=0, ipcPort=42701, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:48:33,947 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@fbfcb3a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/java.io.tmpdir/jetty-localhost-37377-hadoop-hdfs-3_4_1-tests_jar-_-any-17517262169757132954/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:48:33,947 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@41d66623{HTTP/1.1, (http/1.1)}{localhost:37377} 2024-11-24T08:48:33,948 INFO [Time-limited test {}] server.Server(415): Started @104564ms 2024-11-24T08:48:33,950 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:48:34,038 WARN [Thread-681 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data3/current/BP-1677742332-172.17.0.2-1732438113484/current, will proceed with Du for space computation calculation, 2024-11-24T08:48:34,039 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data4/current/BP-1677742332-172.17.0.2-1732438113484/current, will proceed with Du for space computation calculation, 2024-11-24T08:48:34,074 WARN [Thread-670 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:48:34,079 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5e888647b2361a04 with lease ID 0x93b51bcf413bec7a: Processing first storage report for DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe from datanode DatanodeRegistration(127.0.0.1:39427, datanodeUuid=dfe95277-a083-409b-81fc-9930f31c832c, infoPort=35951, infoSecurePort=0, ipcPort=37099, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484) 2024-11-24T08:48:34,079 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5e888647b2361a04 with lease ID 0x93b51bcf413bec7a: from storage DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe node DatanodeRegistration(127.0.0.1:39427, datanodeUuid=dfe95277-a083-409b-81fc-9930f31c832c, infoPort=35951, infoSecurePort=0, ipcPort=37099, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:48:34,079 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5e888647b2361a04 with lease ID 0x93b51bcf413bec7a: Processing first storage report for DS-112cf404-b46d-44f1-b1e2-4efb69f5869e from datanode DatanodeRegistration(127.0.0.1:39427, datanodeUuid=dfe95277-a083-409b-81fc-9930f31c832c, infoPort=35951, infoSecurePort=0, ipcPort=37099, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484) 2024-11-24T08:48:34,079 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5e888647b2361a04 with lease ID 0x93b51bcf413bec7a: from storage DS-112cf404-b46d-44f1-b1e2-4efb69f5869e node DatanodeRegistration(127.0.0.1:39427, datanodeUuid=dfe95277-a083-409b-81fc-9930f31c832c, infoPort=35951, infoSecurePort=0, ipcPort=37099, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:48:34,098 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137 2024-11-24T08:48:34,110 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/zookeeper_0, clientPort=58471, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T08:48:34,113 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58471 2024-11-24T08:48:34,114 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:48:34,116 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:48:34,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39427 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:48:34,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:48:34,257 INFO [regionserver/469387a2cdb6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:48:34,549 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77 with version=8 2024-11-24T08:48:34,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/hbase-staging 2024-11-24T08:48:34,555 INFO [Time-limited test {}] client.ConnectionUtils(128): master/469387a2cdb6:0 server-side Connection retries=45 2024-11-24T08:48:34,555 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:48:34,555 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:48:34,556 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:48:34,556 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:48:34,556 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:48:34,556 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T08:48:34,557 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:48:34,557 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39181 2024-11-24T08:48:34,559 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39181 connecting to ZooKeeper ensemble=127.0.0.1:58471 2024-11-24T08:48:34,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:391810x0, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:48:34,563 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39181-0x10070eb1ba40000 connected 2024-11-24T08:48:34,577 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:48:34,579 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:48:34,582 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:48:34,582 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77, hbase.cluster.distributed=false 2024-11-24T08:48:34,584 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:48:34,585 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39181 2024-11-24T08:48:34,586 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39181 2024-11-24T08:48:34,588 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39181 2024-11-24T08:48:34,589 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39181 2024-11-24T08:48:34,589 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39181 2024-11-24T08:48:34,603 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/469387a2cdb6:0 server-side Connection retries=45 2024-11-24T08:48:34,603 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:48:34,603 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:48:34,604 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:48:34,604 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:48:34,604 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:48:34,604 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T08:48:34,604 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:48:34,604 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40901 2024-11-24T08:48:34,606 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40901 connecting to ZooKeeper ensemble=127.0.0.1:58471 2024-11-24T08:48:34,607 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:48:34,608 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:48:34,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:409010x0, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:48:34,612 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40901-0x10070eb1ba40001 connected 2024-11-24T08:48:34,612 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40901-0x10070eb1ba40001, quorum=127.0.0.1:58471, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:48:34,613 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T08:48:34,613 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T08:48:34,614 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40901-0x10070eb1ba40001, quorum=127.0.0.1:58471, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T08:48:34,615 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40901-0x10070eb1ba40001, quorum=127.0.0.1:58471, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:48:34,617 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40901 2024-11-24T08:48:34,617 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40901 2024-11-24T08:48:34,618 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40901 2024-11-24T08:48:34,618 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40901 2024-11-24T08:48:34,618 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40901 2024-11-24T08:48:34,631 DEBUG [M:0;469387a2cdb6:39181 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;469387a2cdb6:39181 2024-11-24T08:48:34,632 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/469387a2cdb6,39181,1732438114554 2024-11-24T08:48:34,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:48:34,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40901-0x10070eb1ba40001, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:48:34,633 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/469387a2cdb6,39181,1732438114554 2024-11-24T08:48:34,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40901-0x10070eb1ba40001, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T08:48:34,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40901-0x10070eb1ba40001, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:34,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:34,635 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T08:48:34,635 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/469387a2cdb6,39181,1732438114554 from backup master directory 2024-11-24T08:48:34,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/469387a2cdb6,39181,1732438114554 2024-11-24T08:48:34,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40901-0x10070eb1ba40001, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:48:34,636 WARN [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:48:34,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:48:34,636 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=469387a2cdb6,39181,1732438114554 2024-11-24T08:48:34,640 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/hbase.id] with ID: bdd9c671-19da-42cd-a35a-fec164405f40 2024-11-24T08:48:34,640 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/.tmp/hbase.id 2024-11-24T08:48:34,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39427 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:48:34,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:48:34,647 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/.tmp/hbase.id]:[hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/hbase.id] 2024-11-24T08:48:34,662 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:48:34,662 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T08:48:34,664 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-24T08:48:34,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:34,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40901-0x10070eb1ba40001, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:34,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:48:34,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39427 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:48:34,673 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:48:34,674 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T08:48:34,674 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:48:34,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:48:34,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39427 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:48:34,683 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store 2024-11-24T08:48:34,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:48:34,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39427 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:48:34,690 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:48:34,691 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:48:34,691 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:48:34,691 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:48:34,691 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:48:34,691 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:48:34,691 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:48:34,691 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732438114691Disabling compacts and flushes for region at 1732438114691Disabling writes for close at 1732438114691Writing region close event to WAL at 1732438114691Closed at 1732438114691 2024-11-24T08:48:34,692 WARN [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/.initializing 2024-11-24T08:48:34,692 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/WALs/469387a2cdb6,39181,1732438114554 2024-11-24T08:48:34,695 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=469387a2cdb6%2C39181%2C1732438114554, suffix=, logDir=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/WALs/469387a2cdb6,39181,1732438114554, archiveDir=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/oldWALs, maxLogs=10 2024-11-24T08:48:34,695 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C39181%2C1732438114554.1732438114695 2024-11-24T08:48:34,701 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/WALs/469387a2cdb6,39181,1732438114554/469387a2cdb6%2C39181%2C1732438114554.1732438114695 2024-11-24T08:48:34,702 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46357:46357),(127.0.0.1/127.0.0.1:35951:35951)] 2024-11-24T08:48:34,705 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:48:34,706 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:48:34,706 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:34,706 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:34,708 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:34,710 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T08:48:34,710 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:34,711 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:48:34,711 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:34,713 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T08:48:34,713 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:34,713 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:48:34,713 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:34,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T08:48:34,715 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:34,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:48:34,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:34,717 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T08:48:34,717 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:34,717 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:48:34,718 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:34,718 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:34,719 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:34,720 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:34,720 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:34,721 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T08:48:34,722 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:48:34,724 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:48:34,725 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=758555, jitterRate=-0.035447970032691956}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T08:48:34,726 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732438114706Initializing all the Stores at 1732438114707 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438114707Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438114707Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438114707Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438114707Cleaning up temporary data from old regions at 1732438114720 (+13 ms)Region opened successfully at 1732438114726 (+6 ms) 2024-11-24T08:48:34,726 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T08:48:34,730 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@728eb43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=469387a2cdb6/172.17.0.2:0 2024-11-24T08:48:34,731 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T08:48:34,731 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T08:48:34,731 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T08:48:34,732 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T08:48:34,732 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T08:48:34,733 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T08:48:34,733 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T08:48:34,735 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T08:48:34,736 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T08:48:34,737 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T08:48:34,737 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T08:48:34,738 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T08:48:34,738 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T08:48:34,739 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T08:48:34,740 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T08:48:34,740 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T08:48:34,741 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T08:48:34,742 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T08:48:34,744 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T08:48:34,744 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T08:48:34,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:48:34,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40901-0x10070eb1ba40001, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:48:34,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:34,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40901-0x10070eb1ba40001, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:34,746 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=469387a2cdb6,39181,1732438114554, sessionid=0x10070eb1ba40000, setting cluster-up flag (Was=false) 2024-11-24T08:48:34,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40901-0x10070eb1ba40001, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:34,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:34,750 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T08:48:34,751 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=469387a2cdb6,39181,1732438114554 2024-11-24T08:48:34,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40901-0x10070eb1ba40001, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:34,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:34,756 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T08:48:34,757 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=469387a2cdb6,39181,1732438114554 2024-11-24T08:48:34,759 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T08:48:34,761 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T08:48:34,761 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T08:48:34,761 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T08:48:34,762 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 469387a2cdb6,39181,1732438114554 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T08:48:34,763 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:48:34,763 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:48:34,763 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:48:34,764 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:48:34,764 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/469387a2cdb6:0, corePoolSize=10, maxPoolSize=10 2024-11-24T08:48:34,764 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:34,764 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/469387a2cdb6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:48:34,764 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:34,765 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732438144765 2024-11-24T08:48:34,765 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T08:48:34,765 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T08:48:34,765 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T08:48:34,766 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T08:48:34,766 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T08:48:34,766 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T08:48:34,766 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:34,766 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T08:48:34,766 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T08:48:34,766 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:48:34,766 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T08:48:34,766 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T08:48:34,767 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T08:48:34,767 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T08:48:34,767 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.large.0-1732438114767,5,FailOnTimeoutGroup] 2024-11-24T08:48:34,767 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.small.0-1732438114767,5,FailOnTimeoutGroup] 2024-11-24T08:48:34,767 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:34,767 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T08:48:34,767 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:34,767 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:34,767 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:34,768 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T08:48:34,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:48:34,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39427 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:48:34,775 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T08:48:34,775 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77 2024-11-24T08:48:34,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:48:34,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39427 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:48:34,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:48:34,783 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:48:34,785 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:48:34,785 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:34,785 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:48:34,785 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:48:34,787 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:48:34,787 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:34,787 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:48:34,787 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:48:34,788 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:48:34,789 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:34,789 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:48:34,789 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:48:34,790 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:48:34,790 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:34,791 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:48:34,791 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:48:34,792 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/hbase/meta/1588230740 2024-11-24T08:48:34,793 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/hbase/meta/1588230740 2024-11-24T08:48:34,794 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:48:34,794 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:48:34,795 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:48:34,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:48:34,798 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:48:34,799 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=845190, jitterRate=0.0747147798538208}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:48:34,800 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732438114782Initializing all the Stores at 1732438114783 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438114783Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438114783Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438114783Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438114783Cleaning up temporary data from old regions at 1732438114794 (+11 ms)Region opened successfully at 1732438114800 (+6 ms) 2024-11-24T08:48:34,800 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:48:34,800 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:48:34,800 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:48:34,800 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:48:34,801 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:48:34,801 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:48:34,801 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732438114800Disabling compacts and flushes for region at 1732438114800Disabling writes for close at 1732438114801 (+1 ms)Writing region close event to WAL at 1732438114801Closed at 1732438114801 2024-11-24T08:48:34,803 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:48:34,803 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T08:48:34,803 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T08:48:34,804 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:48:34,806 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T08:48:34,821 INFO [RS:0;469387a2cdb6:40901 {}] regionserver.HRegionServer(746): ClusterId : bdd9c671-19da-42cd-a35a-fec164405f40 2024-11-24T08:48:34,821 DEBUG [RS:0;469387a2cdb6:40901 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T08:48:34,823 DEBUG [RS:0;469387a2cdb6:40901 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T08:48:34,823 DEBUG [RS:0;469387a2cdb6:40901 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T08:48:34,825 DEBUG [RS:0;469387a2cdb6:40901 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T08:48:34,825 DEBUG [RS:0;469387a2cdb6:40901 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18963023, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=469387a2cdb6/172.17.0.2:0 2024-11-24T08:48:34,842 DEBUG [RS:0;469387a2cdb6:40901 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;469387a2cdb6:40901 2024-11-24T08:48:34,843 INFO [RS:0;469387a2cdb6:40901 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T08:48:34,843 INFO [RS:0;469387a2cdb6:40901 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T08:48:34,843 DEBUG [RS:0;469387a2cdb6:40901 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T08:48:34,844 INFO [RS:0;469387a2cdb6:40901 {}] regionserver.HRegionServer(2659): reportForDuty to master=469387a2cdb6,39181,1732438114554 with port=40901, startcode=1732438114603 2024-11-24T08:48:34,844 DEBUG [RS:0;469387a2cdb6:40901 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T08:48:34,846 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37925, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T08:48:34,847 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39181 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 469387a2cdb6,40901,1732438114603 2024-11-24T08:48:34,847 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39181 {}] master.ServerManager(517): Registering regionserver=469387a2cdb6,40901,1732438114603 2024-11-24T08:48:34,849 DEBUG [RS:0;469387a2cdb6:40901 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77 2024-11-24T08:48:34,849 DEBUG [RS:0;469387a2cdb6:40901 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36097 2024-11-24T08:48:34,849 DEBUG [RS:0;469387a2cdb6:40901 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T08:48:34,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:48:34,851 DEBUG [RS:0;469387a2cdb6:40901 {}] zookeeper.ZKUtil(111): regionserver:40901-0x10070eb1ba40001, quorum=127.0.0.1:58471, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/469387a2cdb6,40901,1732438114603 2024-11-24T08:48:34,851 WARN [RS:0;469387a2cdb6:40901 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:48:34,851 INFO [RS:0;469387a2cdb6:40901 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:48:34,852 DEBUG [RS:0;469387a2cdb6:40901 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603 2024-11-24T08:48:34,852 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [469387a2cdb6,40901,1732438114603] 2024-11-24T08:48:34,856 INFO [RS:0;469387a2cdb6:40901 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T08:48:34,858 INFO [RS:0;469387a2cdb6:40901 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T08:48:34,860 INFO [RS:0;469387a2cdb6:40901 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T08:48:34,860 INFO [RS:0;469387a2cdb6:40901 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:34,861 INFO [RS:0;469387a2cdb6:40901 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T08:48:34,862 INFO [RS:0;469387a2cdb6:40901 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T08:48:34,862 INFO [RS:0;469387a2cdb6:40901 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:34,862 DEBUG [RS:0;469387a2cdb6:40901 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:34,862 DEBUG [RS:0;469387a2cdb6:40901 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:34,863 DEBUG [RS:0;469387a2cdb6:40901 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:34,863 DEBUG [RS:0;469387a2cdb6:40901 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:34,863 DEBUG [RS:0;469387a2cdb6:40901 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:34,863 DEBUG [RS:0;469387a2cdb6:40901 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/469387a2cdb6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:48:34,863 DEBUG [RS:0;469387a2cdb6:40901 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:34,863 DEBUG [RS:0;469387a2cdb6:40901 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:34,863 DEBUG [RS:0;469387a2cdb6:40901 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:34,863 DEBUG [RS:0;469387a2cdb6:40901 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:34,863 DEBUG [RS:0;469387a2cdb6:40901 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:34,863 DEBUG [RS:0;469387a2cdb6:40901 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:34,863 DEBUG [RS:0;469387a2cdb6:40901 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/469387a2cdb6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:48:34,863 DEBUG [RS:0;469387a2cdb6:40901 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:48:34,865 INFO [RS:0;469387a2cdb6:40901 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:34,865 INFO [RS:0;469387a2cdb6:40901 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:34,866 INFO [RS:0;469387a2cdb6:40901 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:34,866 INFO [RS:0;469387a2cdb6:40901 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:34,866 INFO [RS:0;469387a2cdb6:40901 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:34,866 INFO [RS:0;469387a2cdb6:40901 {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,40901,1732438114603-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:48:34,885 INFO [RS:0;469387a2cdb6:40901 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T08:48:34,885 INFO [RS:0;469387a2cdb6:40901 {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,40901,1732438114603-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:34,885 INFO [RS:0;469387a2cdb6:40901 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:34,885 INFO [RS:0;469387a2cdb6:40901 {}] regionserver.Replication(171): 469387a2cdb6,40901,1732438114603 started 2024-11-24T08:48:34,901 INFO [RS:0;469387a2cdb6:40901 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:34,902 INFO [RS:0;469387a2cdb6:40901 {}] regionserver.HRegionServer(1482): Serving as 469387a2cdb6,40901,1732438114603, RpcServer on 469387a2cdb6/172.17.0.2:40901, sessionid=0x10070eb1ba40001 2024-11-24T08:48:34,902 DEBUG [RS:0;469387a2cdb6:40901 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T08:48:34,902 DEBUG [RS:0;469387a2cdb6:40901 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 469387a2cdb6,40901,1732438114603 2024-11-24T08:48:34,902 DEBUG [RS:0;469387a2cdb6:40901 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '469387a2cdb6,40901,1732438114603' 2024-11-24T08:48:34,902 DEBUG [RS:0;469387a2cdb6:40901 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T08:48:34,903 DEBUG [RS:0;469387a2cdb6:40901 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T08:48:34,904 DEBUG [RS:0;469387a2cdb6:40901 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T08:48:34,904 DEBUG [RS:0;469387a2cdb6:40901 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T08:48:34,904 DEBUG [RS:0;469387a2cdb6:40901 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 469387a2cdb6,40901,1732438114603 2024-11-24T08:48:34,904 DEBUG [RS:0;469387a2cdb6:40901 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '469387a2cdb6,40901,1732438114603' 2024-11-24T08:48:34,904 DEBUG [RS:0;469387a2cdb6:40901 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T08:48:34,904 DEBUG [RS:0;469387a2cdb6:40901 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T08:48:34,905 DEBUG [RS:0;469387a2cdb6:40901 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T08:48:34,905 INFO [RS:0;469387a2cdb6:40901 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T08:48:34,905 INFO [RS:0;469387a2cdb6:40901 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T08:48:34,956 WARN [469387a2cdb6:39181 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T08:48:35,007 INFO [RS:0;469387a2cdb6:40901 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=469387a2cdb6%2C40901%2C1732438114603, suffix=, logDir=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603, archiveDir=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/oldWALs, maxLogs=32 2024-11-24T08:48:35,008 INFO [RS:0;469387a2cdb6:40901 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C40901%2C1732438114603.1732438115008 2024-11-24T08:48:35,015 INFO [RS:0;469387a2cdb6:40901 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438115008 2024-11-24T08:48:35,052 DEBUG [RS:0;469387a2cdb6:40901 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46357:46357),(127.0.0.1/127.0.0.1:35951:35951)] 2024-11-24T08:48:35,207 DEBUG [469387a2cdb6:39181 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T08:48:35,208 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=469387a2cdb6,40901,1732438114603 2024-11-24T08:48:35,210 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 469387a2cdb6,40901,1732438114603, state=OPENING 2024-11-24T08:48:35,212 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T08:48:35,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:35,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40901-0x10070eb1ba40001, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:48:35,216 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:48:35,216 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:48:35,216 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:48:35,217 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=469387a2cdb6,40901,1732438114603}] 2024-11-24T08:48:35,318 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:48:35,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T08:48:35,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-24T08:48:35,371 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T08:48:35,373 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56919, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T08:48:35,377 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T08:48:35,377 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:48:35,379 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=469387a2cdb6%2C40901%2C1732438114603.meta, suffix=.meta, logDir=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603, archiveDir=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/oldWALs, maxLogs=32 2024-11-24T08:48:35,380 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta 2024-11-24T08:48:35,385 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta 2024-11-24T08:48:35,388 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46357:46357),(127.0.0.1/127.0.0.1:35951:35951)] 2024-11-24T08:48:35,392 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:48:35,393 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T08:48:35,393 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T08:48:35,393 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T08:48:35,393 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T08:48:35,393 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:48:35,393 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T08:48:35,393 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T08:48:35,395 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:48:35,396 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:48:35,396 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:35,397 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:48:35,397 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:48:35,398 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:48:35,398 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:35,398 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:48:35,398 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:48:35,399 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:48:35,399 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:35,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:48:35,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:48:35,400 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:48:35,400 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:35,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:48:35,401 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:48:35,402 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/hbase/meta/1588230740 2024-11-24T08:48:35,403 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/hbase/meta/1588230740 2024-11-24T08:48:35,404 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:48:35,404 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:48:35,404 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:48:35,406 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:48:35,406 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=870889, jitterRate=0.1073928028345108}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:48:35,407 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T08:48:35,407 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732438115393Writing region info on filesystem at 1732438115393Initializing all the Stores at 1732438115394 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438115394Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438115395 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438115395Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438115395Cleaning up temporary data from old regions at 1732438115404 (+9 ms)Running coprocessor post-open hooks at 1732438115407 (+3 ms)Region opened successfully at 1732438115407 2024-11-24T08:48:35,408 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732438115371 2024-11-24T08:48:35,411 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T08:48:35,412 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T08:48:35,412 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=469387a2cdb6,40901,1732438114603 2024-11-24T08:48:35,413 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 469387a2cdb6,40901,1732438114603, state=OPEN 2024-11-24T08:48:35,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:48:35,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40901-0x10070eb1ba40001, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:48:35,415 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=469387a2cdb6,40901,1732438114603 2024-11-24T08:48:35,416 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:48:35,416 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:48:35,419 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T08:48:35,419 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=469387a2cdb6,40901,1732438114603 in 200 msec 2024-11-24T08:48:35,422 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T08:48:35,422 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 616 msec 2024-11-24T08:48:35,423 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:48:35,423 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T08:48:35,424 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:48:35,424 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=469387a2cdb6,40901,1732438114603, seqNum=-1] 2024-11-24T08:48:35,425 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:48:35,426 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33051, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:48:35,433 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 672 msec 2024-11-24T08:48:35,433 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732438115433, completionTime=-1 2024-11-24T08:48:35,433 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T08:48:35,433 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T08:48:35,436 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T08:48:35,436 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732438175436 2024-11-24T08:48:35,436 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732438235436 2024-11-24T08:48:35,436 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-24T08:48:35,436 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,39181,1732438114554-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:35,436 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,39181,1732438114554-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:35,436 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,39181,1732438114554-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:35,436 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-469387a2cdb6:39181, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:35,437 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:35,437 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:35,438 DEBUG [master/469387a2cdb6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T08:48:35,441 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.804sec 2024-11-24T08:48:35,441 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T08:48:35,441 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T08:48:35,441 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T08:48:35,441 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T08:48:35,441 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T08:48:35,441 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,39181,1732438114554-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:48:35,441 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,39181,1732438114554-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T08:48:35,443 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64f66ec6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:48:35,443 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 469387a2cdb6,39181,-1 for getting cluster id 2024-11-24T08:48:35,443 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T08:48:35,444 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T08:48:35,444 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T08:48:35,444 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,39181,1732438114554-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:35,445 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'bdd9c671-19da-42cd-a35a-fec164405f40' 2024-11-24T08:48:35,446 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T08:48:35,446 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "bdd9c671-19da-42cd-a35a-fec164405f40" 2024-11-24T08:48:35,446 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4eea958a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:48:35,446 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [469387a2cdb6,39181,-1] 2024-11-24T08:48:35,446 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T08:48:35,447 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:48:35,448 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44938, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T08:48:35,449 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b1e030d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:48:35,449 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:48:35,450 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=469387a2cdb6,40901,1732438114603, seqNum=-1] 2024-11-24T08:48:35,451 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:48:35,452 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33102, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:48:35,454 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=469387a2cdb6,39181,1732438114554 2024-11-24T08:48:35,454 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:48:35,457 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T08:48:35,472 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/469387a2cdb6:0 server-side Connection retries=45 2024-11-24T08:48:35,472 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:48:35,472 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:48:35,472 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:48:35,473 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:48:35,473 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:48:35,473 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T08:48:35,473 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:48:35,473 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39681 2024-11-24T08:48:35,475 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39681 connecting to ZooKeeper ensemble=127.0.0.1:58471 2024-11-24T08:48:35,475 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:48:35,477 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:48:35,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:396810x0, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:48:35,481 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:39681-0x10070eb1ba40002, quorum=127.0.0.1:58471, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-24T08:48:35,481 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39681-0x10070eb1ba40002 connected 2024-11-24T08:48:35,481 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-24T08:48:35,482 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T08:48:35,489 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T08:48:35,490 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:39681-0x10070eb1ba40002, quorum=127.0.0.1:58471, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T08:48:35,491 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39681-0x10070eb1ba40002, quorum=127.0.0.1:58471, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:48:35,493 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39681 2024-11-24T08:48:35,493 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39681 2024-11-24T08:48:35,496 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39681 2024-11-24T08:48:35,497 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39681 2024-11-24T08:48:35,497 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39681 2024-11-24T08:48:35,499 INFO [RS:1;469387a2cdb6:39681 {}] regionserver.HRegionServer(746): ClusterId : bdd9c671-19da-42cd-a35a-fec164405f40 2024-11-24T08:48:35,499 DEBUG [RS:1;469387a2cdb6:39681 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T08:48:35,500 DEBUG [RS:1;469387a2cdb6:39681 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T08:48:35,501 DEBUG [RS:1;469387a2cdb6:39681 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T08:48:35,502 DEBUG [RS:1;469387a2cdb6:39681 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T08:48:35,503 DEBUG [RS:1;469387a2cdb6:39681 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6816cf5e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=469387a2cdb6/172.17.0.2:0 2024-11-24T08:48:35,514 DEBUG [RS:1;469387a2cdb6:39681 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;469387a2cdb6:39681 2024-11-24T08:48:35,514 INFO [RS:1;469387a2cdb6:39681 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T08:48:35,514 INFO [RS:1;469387a2cdb6:39681 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T08:48:35,514 DEBUG [RS:1;469387a2cdb6:39681 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T08:48:35,515 INFO [RS:1;469387a2cdb6:39681 {}] regionserver.HRegionServer(2659): reportForDuty to master=469387a2cdb6,39181,1732438114554 with port=39681, startcode=1732438115472 2024-11-24T08:48:35,515 DEBUG [RS:1;469387a2cdb6:39681 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T08:48:35,517 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53349, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T08:48:35,518 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39181 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 469387a2cdb6,39681,1732438115472 2024-11-24T08:48:35,518 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39181 {}] master.ServerManager(517): Registering regionserver=469387a2cdb6,39681,1732438115472 2024-11-24T08:48:35,520 DEBUG [RS:1;469387a2cdb6:39681 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77 2024-11-24T08:48:35,520 DEBUG [RS:1;469387a2cdb6:39681 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36097 2024-11-24T08:48:35,520 DEBUG [RS:1;469387a2cdb6:39681 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T08:48:35,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:48:35,522 DEBUG [RS:1;469387a2cdb6:39681 {}] zookeeper.ZKUtil(111): regionserver:39681-0x10070eb1ba40002, quorum=127.0.0.1:58471, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/469387a2cdb6,39681,1732438115472 2024-11-24T08:48:35,522 WARN [RS:1;469387a2cdb6:39681 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:48:35,522 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [469387a2cdb6,39681,1732438115472] 2024-11-24T08:48:35,522 INFO [RS:1;469387a2cdb6:39681 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:48:35,522 DEBUG [RS:1;469387a2cdb6:39681 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472 2024-11-24T08:48:35,526 INFO [RS:1;469387a2cdb6:39681 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T08:48:35,528 INFO [RS:1;469387a2cdb6:39681 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T08:48:35,528 INFO [RS:1;469387a2cdb6:39681 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T08:48:35,528 INFO [RS:1;469387a2cdb6:39681 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:35,528 INFO [RS:1;469387a2cdb6:39681 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T08:48:35,529 INFO [RS:1;469387a2cdb6:39681 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T08:48:35,529 INFO [RS:1;469387a2cdb6:39681 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:35,529 DEBUG [RS:1;469387a2cdb6:39681 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:35,530 DEBUG [RS:1;469387a2cdb6:39681 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:35,530 DEBUG [RS:1;469387a2cdb6:39681 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:35,530 DEBUG [RS:1;469387a2cdb6:39681 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:35,530 DEBUG [RS:1;469387a2cdb6:39681 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:35,530 DEBUG [RS:1;469387a2cdb6:39681 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/469387a2cdb6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:48:35,530 DEBUG [RS:1;469387a2cdb6:39681 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:35,530 DEBUG [RS:1;469387a2cdb6:39681 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:35,530 DEBUG [RS:1;469387a2cdb6:39681 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:35,530 DEBUG [RS:1;469387a2cdb6:39681 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:35,530 DEBUG [RS:1;469387a2cdb6:39681 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:35,530 DEBUG [RS:1;469387a2cdb6:39681 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:48:35,530 DEBUG [RS:1;469387a2cdb6:39681 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/469387a2cdb6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:48:35,530 DEBUG [RS:1;469387a2cdb6:39681 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:48:35,533 INFO [RS:1;469387a2cdb6:39681 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:35,533 INFO [RS:1;469387a2cdb6:39681 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:35,533 INFO [RS:1;469387a2cdb6:39681 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:35,534 INFO [RS:1;469387a2cdb6:39681 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:35,534 INFO [RS:1;469387a2cdb6:39681 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:35,534 INFO [RS:1;469387a2cdb6:39681 {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,39681,1732438115472-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:48:35,548 INFO [RS:1;469387a2cdb6:39681 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T08:48:35,548 INFO [RS:1;469387a2cdb6:39681 {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,39681,1732438115472-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:35,548 INFO [RS:1;469387a2cdb6:39681 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:35,548 INFO [RS:1;469387a2cdb6:39681 {}] regionserver.Replication(171): 469387a2cdb6,39681,1732438115472 started 2024-11-24T08:48:35,561 INFO [RS:1;469387a2cdb6:39681 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:48:35,562 INFO [RS:1;469387a2cdb6:39681 {}] regionserver.HRegionServer(1482): Serving as 469387a2cdb6,39681,1732438115472, RpcServer on 469387a2cdb6/172.17.0.2:39681, sessionid=0x10070eb1ba40002 2024-11-24T08:48:35,562 DEBUG [RS:1;469387a2cdb6:39681 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T08:48:35,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;469387a2cdb6:39681,5,FailOnTimeoutGroup] 2024-11-24T08:48:35,562 DEBUG [RS:1;469387a2cdb6:39681 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 469387a2cdb6,39681,1732438115472 2024-11-24T08:48:35,562 DEBUG [RS:1;469387a2cdb6:39681 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '469387a2cdb6,39681,1732438115472' 2024-11-24T08:48:35,562 DEBUG [RS:1;469387a2cdb6:39681 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T08:48:35,562 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-24T08:48:35,563 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T08:48:35,563 DEBUG [RS:1;469387a2cdb6:39681 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T08:48:35,563 DEBUG [RS:1;469387a2cdb6:39681 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T08:48:35,563 DEBUG [RS:1;469387a2cdb6:39681 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T08:48:35,563 DEBUG [RS:1;469387a2cdb6:39681 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 469387a2cdb6,39681,1732438115472 2024-11-24T08:48:35,563 DEBUG [RS:1;469387a2cdb6:39681 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '469387a2cdb6,39681,1732438115472' 2024-11-24T08:48:35,563 DEBUG [RS:1;469387a2cdb6:39681 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T08:48:35,564 DEBUG [RS:1;469387a2cdb6:39681 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T08:48:35,564 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 469387a2cdb6,39181,1732438114554 2024-11-24T08:48:35,564 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@74c93387 2024-11-24T08:48:35,564 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T08:48:35,564 DEBUG [RS:1;469387a2cdb6:39681 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T08:48:35,564 INFO [RS:1;469387a2cdb6:39681 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T08:48:35,564 INFO [RS:1;469387a2cdb6:39681 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T08:48:35,566 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44946, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T08:48:35,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39181 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T08:48:35,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39181 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T08:48:35,567 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39181 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:48:35,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39181 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T08:48:35,570 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T08:48:35,570 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:35,570 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39181 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-24T08:48:35,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39181 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T08:48:35,571 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T08:48:35,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39427 is added to blk_1073741835_1011 (size=393) 2024-11-24T08:48:35,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741835_1011 (size=393) 2024-11-24T08:48:35,580 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a20423784cc4152477ea73ae19a5d531, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77 2024-11-24T08:48:35,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741836_1012 (size=76) 2024-11-24T08:48:35,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39427 is added to blk_1073741836_1012 (size=76) 2024-11-24T08:48:35,588 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:48:35,588 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing a20423784cc4152477ea73ae19a5d531, disabling compactions & flushes 2024-11-24T08:48:35,588 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531. 2024-11-24T08:48:35,588 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531. 2024-11-24T08:48:35,588 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531. after waiting 0 ms 2024-11-24T08:48:35,588 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531. 2024-11-24T08:48:35,588 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531. 2024-11-24T08:48:35,588 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for a20423784cc4152477ea73ae19a5d531: Waiting for close lock at 1732438115588Disabling compacts and flushes for region at 1732438115588Disabling writes for close at 1732438115588Writing region close event to WAL at 1732438115588Closed at 1732438115588 2024-11-24T08:48:35,590 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T08:48:35,591 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732438115590"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732438115590"}]},"ts":"1732438115590"} 2024-11-24T08:48:35,594 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T08:48:35,595 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T08:48:35,595 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732438115595"}]},"ts":"1732438115595"} 2024-11-24T08:48:35,598 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-24T08:48:35,598 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a20423784cc4152477ea73ae19a5d531, ASSIGN}] 2024-11-24T08:48:35,600 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a20423784cc4152477ea73ae19a5d531, ASSIGN 2024-11-24T08:48:35,601 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a20423784cc4152477ea73ae19a5d531, ASSIGN; state=OFFLINE, location=469387a2cdb6,40901,1732438114603; forceNewPlan=false, retain=false 2024-11-24T08:48:35,666 INFO [RS:1;469387a2cdb6:39681 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=469387a2cdb6%2C39681%2C1732438115472, suffix=, logDir=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472, archiveDir=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/oldWALs, maxLogs=32 2024-11-24T08:48:35,667 INFO [RS:1;469387a2cdb6:39681 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C39681%2C1732438115472.1732438115667 2024-11-24T08:48:35,673 INFO [RS:1;469387a2cdb6:39681 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 2024-11-24T08:48:35,677 DEBUG [RS:1;469387a2cdb6:39681 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46357:46357),(127.0.0.1/127.0.0.1:35951:35951)] 2024-11-24T08:48:35,750 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:48:35,752 INFO [469387a2cdb6:39181 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-24T08:48:35,752 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a20423784cc4152477ea73ae19a5d531, regionState=OPENING, regionLocation=469387a2cdb6,40901,1732438114603 2024-11-24T08:48:35,756 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a20423784cc4152477ea73ae19a5d531, ASSIGN because future has completed 2024-11-24T08:48:35,757 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a20423784cc4152477ea73ae19a5d531, server=469387a2cdb6,40901,1732438114603}] 2024-11-24T08:48:35,761 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:48:35,919 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531. 2024-11-24T08:48:35,919 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => a20423784cc4152477ea73ae19a5d531, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:48:35,921 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath a20423784cc4152477ea73ae19a5d531 2024-11-24T08:48:35,921 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:48:35,921 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for a20423784cc4152477ea73ae19a5d531 2024-11-24T08:48:35,921 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for a20423784cc4152477ea73ae19a5d531 2024-11-24T08:48:35,924 INFO [StoreOpener-a20423784cc4152477ea73ae19a5d531-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a20423784cc4152477ea73ae19a5d531 2024-11-24T08:48:35,926 INFO [StoreOpener-a20423784cc4152477ea73ae19a5d531-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a20423784cc4152477ea73ae19a5d531 columnFamilyName info 2024-11-24T08:48:35,926 DEBUG [StoreOpener-a20423784cc4152477ea73ae19a5d531-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:48:35,926 INFO [StoreOpener-a20423784cc4152477ea73ae19a5d531-1 {}] regionserver.HStore(327): Store=a20423784cc4152477ea73ae19a5d531/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:48:35,927 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for a20423784cc4152477ea73ae19a5d531 2024-11-24T08:48:35,927 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531 2024-11-24T08:48:35,928 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531 2024-11-24T08:48:35,929 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for a20423784cc4152477ea73ae19a5d531 2024-11-24T08:48:35,929 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for a20423784cc4152477ea73ae19a5d531 2024-11-24T08:48:35,931 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for a20423784cc4152477ea73ae19a5d531 2024-11-24T08:48:35,933 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:48:35,933 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened a20423784cc4152477ea73ae19a5d531; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=828185, jitterRate=0.053092196583747864}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T08:48:35,934 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a20423784cc4152477ea73ae19a5d531 2024-11-24T08:48:35,934 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for a20423784cc4152477ea73ae19a5d531: Running coprocessor pre-open hook at 1732438115921Writing region info on filesystem at 1732438115921Initializing all the Stores at 1732438115923 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438115923Cleaning up temporary data from old regions at 1732438115929 (+6 ms)Running coprocessor post-open hooks at 1732438115934 (+5 ms)Region opened successfully at 1732438115934 2024-11-24T08:48:35,935 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531., pid=6, masterSystemTime=1732438115911 2024-11-24T08:48:35,938 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531. 2024-11-24T08:48:35,938 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531. 2024-11-24T08:48:35,939 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a20423784cc4152477ea73ae19a5d531, regionState=OPEN, openSeqNum=2, regionLocation=469387a2cdb6,40901,1732438114603 2024-11-24T08:48:35,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a20423784cc4152477ea73ae19a5d531, server=469387a2cdb6,40901,1732438114603 because future has completed 2024-11-24T08:48:35,948 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T08:48:35,948 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure a20423784cc4152477ea73ae19a5d531, server=469387a2cdb6,40901,1732438114603 in 188 msec 2024-11-24T08:48:35,952 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T08:48:35,952 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a20423784cc4152477ea73ae19a5d531, ASSIGN in 350 msec 2024-11-24T08:48:35,953 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T08:48:35,954 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732438115953"}]},"ts":"1732438115953"} 2024-11-24T08:48:35,957 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-24T08:48:35,959 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T08:48:35,962 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 392 msec 2024-11-24T08:48:36,280 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T08:48:36,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:48:36,310 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:48:36,311 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:48:36,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:48:40,856 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T08:48:40,857 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-24T08:48:41,424 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T08:48:41,428 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:48:41,452 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:48:41,457 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:48:41,457 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:48:45,318 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T08:48:45,318 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T08:48:45,320 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T08:48:45,320 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-24T08:48:45,322 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:48:45,323 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T08:48:45,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39181 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T08:48:45,640 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-24T08:48:45,640 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-24T08:48:45,649 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T08:48:45,649 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531. 2024-11-24T08:48:45,659 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:48:45,662 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:48:45,663 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:48:45,663 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:48:45,664 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:48:45,664 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@443823b4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:48:45,664 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6357ab18{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:48:45,756 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@d5a0567{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/java.io.tmpdir/jetty-localhost-36495-hadoop-hdfs-3_4_1-tests_jar-_-any-3992641219269681168/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:48:45,757 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@168c0f83{HTTP/1.1, (http/1.1)}{localhost:36495} 2024-11-24T08:48:45,757 INFO [Time-limited test {}] server.Server(415): Started @116374ms 2024-11-24T08:48:45,758 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:48:45,790 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:48:45,793 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:48:45,795 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:48:45,795 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:48:45,796 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:48:45,796 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3049786d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:48:45,796 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@412f6c3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:48:45,816 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data6/current/BP-1677742332-172.17.0.2-1732438113484/current, will proceed with Du for space computation calculation, 2024-11-24T08:48:45,816 WARN [Thread-827 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data5/current/BP-1677742332-172.17.0.2-1732438113484/current, will proceed with Du for space computation calculation, 2024-11-24T08:48:45,836 WARN [Thread-807 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:48:45,839 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1081c6c3319e37c6 with lease ID 0x93b51bcf413bec7b: Processing first storage report for DS-eb4ea890-d79e-435c-9739-4e0565995c05 from datanode DatanodeRegistration(127.0.0.1:34413, datanodeUuid=6e0838c4-cf76-46d3-9f69-92550c40108c, infoPort=45047, infoSecurePort=0, ipcPort=36647, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484) 2024-11-24T08:48:45,839 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1081c6c3319e37c6 with lease ID 0x93b51bcf413bec7b: from storage DS-eb4ea890-d79e-435c-9739-4e0565995c05 node DatanodeRegistration(127.0.0.1:34413, datanodeUuid=6e0838c4-cf76-46d3-9f69-92550c40108c, infoPort=45047, infoSecurePort=0, ipcPort=36647, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T08:48:45,839 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1081c6c3319e37c6 with lease ID 0x93b51bcf413bec7b: Processing first storage report for DS-d2ed8ec1-2ff1-47bc-8ca8-0f1ef5694b71 from datanode DatanodeRegistration(127.0.0.1:34413, datanodeUuid=6e0838c4-cf76-46d3-9f69-92550c40108c, infoPort=45047, infoSecurePort=0, ipcPort=36647, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484) 2024-11-24T08:48:45,839 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1081c6c3319e37c6 with lease ID 0x93b51bcf413bec7b: from storage DS-d2ed8ec1-2ff1-47bc-8ca8-0f1ef5694b71 node DatanodeRegistration(127.0.0.1:34413, datanodeUuid=6e0838c4-cf76-46d3-9f69-92550c40108c, infoPort=45047, infoSecurePort=0, ipcPort=36647, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:48:45,892 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7991aaa4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/java.io.tmpdir/jetty-localhost-36189-hadoop-hdfs-3_4_1-tests_jar-_-any-7011391849502259827/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:48:45,893 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4eebb985{HTTP/1.1, (http/1.1)}{localhost:36189} 2024-11-24T08:48:45,893 INFO [Time-limited test {}] server.Server(415): Started @116509ms 2024-11-24T08:48:45,894 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:48:45,933 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:48:45,938 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:48:45,939 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:48:45,939 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:48:45,939 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:48:45,939 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d11ae98{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:48:45,940 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70095cb0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:48:45,959 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data8/current/BP-1677742332-172.17.0.2-1732438113484/current, will proceed with Du for space computation calculation, 2024-11-24T08:48:45,959 WARN [Thread-862 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data7/current/BP-1677742332-172.17.0.2-1732438113484/current, will proceed with Du for space computation calculation, 2024-11-24T08:48:45,980 WARN [Thread-842 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:48:45,982 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa998871789dc1545 with lease ID 0x93b51bcf413bec7c: Processing first storage report for DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3 from datanode DatanodeRegistration(127.0.0.1:33305, datanodeUuid=0529ac69-c714-4f7a-9d57-9dc2bedeb881, infoPort=44455, infoSecurePort=0, ipcPort=36165, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484) 2024-11-24T08:48:45,982 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa998871789dc1545 with lease ID 0x93b51bcf413bec7c: from storage DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3 node DatanodeRegistration(127.0.0.1:33305, datanodeUuid=0529ac69-c714-4f7a-9d57-9dc2bedeb881, infoPort=44455, infoSecurePort=0, ipcPort=36165, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:48:45,982 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa998871789dc1545 with lease ID 0x93b51bcf413bec7c: Processing first storage report for DS-3286ae47-fa1f-4db0-aa09-d0402c68a739 from datanode DatanodeRegistration(127.0.0.1:33305, datanodeUuid=0529ac69-c714-4f7a-9d57-9dc2bedeb881, infoPort=44455, infoSecurePort=0, ipcPort=36165, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484) 2024-11-24T08:48:45,982 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa998871789dc1545 with lease ID 0x93b51bcf413bec7c: from storage DS-3286ae47-fa1f-4db0-aa09-d0402c68a739 node DatanodeRegistration(127.0.0.1:33305, datanodeUuid=0529ac69-c714-4f7a-9d57-9dc2bedeb881, infoPort=44455, infoSecurePort=0, ipcPort=36165, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:48:46,038 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@26f4ea5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/java.io.tmpdir/jetty-localhost-36575-hadoop-hdfs-3_4_1-tests_jar-_-any-16979518863599760476/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:48:46,039 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@61ab06b3{HTTP/1.1, (http/1.1)}{localhost:36575} 2024-11-24T08:48:46,039 INFO [Time-limited test {}] server.Server(415): Started @116656ms 2024-11-24T08:48:46,040 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:48:46,096 WARN [Thread-888 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data9/current/BP-1677742332-172.17.0.2-1732438113484/current, will proceed with Du for space computation calculation, 2024-11-24T08:48:46,097 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data10/current/BP-1677742332-172.17.0.2-1732438113484/current, will proceed with Du for space computation calculation, 2024-11-24T08:48:46,113 WARN [Thread-877 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:48:46,115 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfeffe464896eeff8 with lease ID 0x93b51bcf413bec7d: Processing first storage report for DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6 from datanode DatanodeRegistration(127.0.0.1:32883, datanodeUuid=ef0fea69-0c34-4c33-aa8d-5dddb5f2e3d7, infoPort=37687, infoSecurePort=0, ipcPort=41447, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484) 2024-11-24T08:48:46,116 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfeffe464896eeff8 with lease ID 0x93b51bcf413bec7d: from storage DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6 node DatanodeRegistration(127.0.0.1:32883, datanodeUuid=ef0fea69-0c34-4c33-aa8d-5dddb5f2e3d7, infoPort=37687, infoSecurePort=0, ipcPort=41447, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:48:46,116 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfeffe464896eeff8 with lease ID 0x93b51bcf413bec7d: Processing first storage report for DS-dc2ae7ba-7b1b-4186-8228-309afca8bf52 from datanode DatanodeRegistration(127.0.0.1:32883, datanodeUuid=ef0fea69-0c34-4c33-aa8d-5dddb5f2e3d7, infoPort=37687, infoSecurePort=0, ipcPort=41447, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484) 2024-11-24T08:48:46,116 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfeffe464896eeff8 with lease ID 0x93b51bcf413bec7d: from storage DS-dc2ae7ba-7b1b-4186-8228-309afca8bf52 node DatanodeRegistration(127.0.0.1:32883, datanodeUuid=ef0fea69-0c34-4c33-aa8d-5dddb5f2e3d7, infoPort=37687, infoSecurePort=0, ipcPort=41447, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:48:46,159 WARN [ResponseProcessor for block BP-1677742332-172.17.0.2-1732438113484:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1677742332-172.17.0.2-1732438113484:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-1677742332-172.17.0.2-1732438113484:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:46,159 WARN [ResponseProcessor for block BP-1677742332-172.17.0.2-1732438113484:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1677742332-172.17.0.2-1732438113484:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1677742332-172.17.0.2-1732438113484:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:46,160 WARN [ResponseProcessor for block BP-1677742332-172.17.0.2-1732438113484:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1677742332-172.17.0.2-1732438113484:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1677742332-172.17.0.2-1732438113484:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:46,159 WARN [ResponseProcessor for block BP-1677742332-172.17.0.2-1732438113484:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1677742332-172.17.0.2-1732438113484:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1677742332-172.17.0.2-1732438113484:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:46,161 WARN [DataStreamer for file /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438115008 block BP-1677742332-172.17.0.2-1732438113484:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK], DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK]) is bad. 2024-11-24T08:48:46,161 WARN [DataStreamer for file /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/WALs/469387a2cdb6,39181,1732438114554/469387a2cdb6%2C39181%2C1732438114554.1732438114695 block BP-1677742332-172.17.0.2-1732438113484:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK], DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK]) is bad. 2024-11-24T08:48:46,161 WARN [DataStreamer for file /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta block BP-1677742332-172.17.0.2-1732438113484:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK], DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK]) is bad. 2024-11-24T08:48:46,161 WARN [DataStreamer for file /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 block BP-1677742332-172.17.0.2-1732438113484:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK], DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK]) is bad. 2024-11-24T08:48:46,161 WARN [PacketResponder: BP-1677742332-172.17.0.2-1732438113484:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39427] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:46,161 WARN [PacketResponder: BP-1677742332-172.17.0.2-1732438113484:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39427] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:46,161 WARN [PacketResponder: BP-1677742332-172.17.0.2-1732438113484:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39427] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:46,161 WARN [PacketResponder: BP-1677742332-172.17.0.2-1732438113484:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39427] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:46,164 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-328006441_22 at /127.0.0.1:56558 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37885:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56558 dst: /127.0.0.1:37885 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:46,164 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:56598 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37885:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56598 dst: /127.0.0.1:37885 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:46,164 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_76628491_22 at /127.0.0.1:56638 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:37885:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56638 dst: /127.0.0.1:37885 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:46,164 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@fbfcb3a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:48:46,164 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:36680 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39427:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36680 dst: /127.0.0.1:39427 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:46,164 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:56606 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37885:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56606 dst: /127.0.0.1:37885 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:46,165 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@41d66623{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:48:46,165 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:36692 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39427:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36692 dst: /127.0.0.1:39427 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:46,165 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:48:46,165 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_76628491_22 at /127.0.0.1:36704 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:39427:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36704 dst: /127.0.0.1:39427 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:46,165 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-328006441_22 at /127.0.0.1:36664 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39427:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36664 dst: /127.0.0.1:39427 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:46,166 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c52e06b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:48:46,166 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30312faa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/hadoop.log.dir/,STOPPED} 2024-11-24T08:48:46,167 WARN [BP-1677742332-172.17.0.2-1732438113484 heartbeating to localhost/127.0.0.1:36097 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:48:46,167 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:48:46,167 WARN [BP-1677742332-172.17.0.2-1732438113484 heartbeating to localhost/127.0.0.1:36097 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1677742332-172.17.0.2-1732438113484 (Datanode Uuid dfe95277-a083-409b-81fc-9930f31c832c) service to localhost/127.0.0.1:36097 2024-11-24T08:48:46,167 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:48:46,168 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data3/current/BP-1677742332-172.17.0.2-1732438113484 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:48:46,168 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data4/current/BP-1677742332-172.17.0.2-1732438113484 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:48:46,168 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:48:46,169 WARN [DataStreamer for file /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/WALs/469387a2cdb6,39181,1732438114554/469387a2cdb6%2C39181%2C1732438114554.1732438114695 block BP-1677742332-172.17.0.2-1732438113484:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:46,169 WARN [DataStreamer for file /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438115008 block BP-1677742332-172.17.0.2-1732438113484:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:46,170 WARN [DataStreamer for file /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 block BP-1677742332-172.17.0.2-1732438113484:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:46,169 WARN [DataStreamer for file /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta block BP-1677742332-172.17.0.2-1732438113484:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:46,172 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6973f479{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:48:46,172 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@289e7e24{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:48:46,172 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:48:46,172 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@284f089c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:48:46,172 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@500184cd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/hadoop.log.dir/,STOPPED} 2024-11-24T08:48:46,174 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:48:46,174 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:48:46,174 WARN [BP-1677742332-172.17.0.2-1732438113484 heartbeating to localhost/127.0.0.1:36097 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:48:46,174 WARN [BP-1677742332-172.17.0.2-1732438113484 heartbeating to localhost/127.0.0.1:36097 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1677742332-172.17.0.2-1732438113484 (Datanode Uuid 5a12bab1-d62c-4022-a015-3366aacc7760) service to localhost/127.0.0.1:36097 2024-11-24T08:48:46,174 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data1/current/BP-1677742332-172.17.0.2-1732438113484 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:48:46,175 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data2/current/BP-1677742332-172.17.0.2-1732438113484 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:48:46,175 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:48:46,178 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531., hostname=469387a2cdb6,40901,1732438114603, seqNum=2] 2024-11-24T08:48:46,180 ERROR [FSHLog-0-hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77-prefix:469387a2cdb6,40901,1732438114603 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:46,180 WARN [FSHLog-0-hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77-prefix:469387a2cdb6,40901,1732438114603 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:46,180 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:46,180 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 469387a2cdb6%2C40901%2C1732438114603:(num 1732438115008) roll requested 2024-11-24T08:48:46,180 INFO [regionserver/469387a2cdb6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C40901%2C1732438114603.1732438126180 2024-11-24T08:48:46,186 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:46,186 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:46,186 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:46,186 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:46,186 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:46,186 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438115008 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438126180 2024-11-24T08:48:46,187 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:46,187 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:46,188 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-24T08:48:46,188 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-24T08:48:46,188 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438115008 2024-11-24T08:48:46,189 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44455:44455),(127.0.0.1/127.0.0.1:45047:45047)] 2024-11-24T08:48:46,189 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438115008 is not closed yet, will try archiving it next time 2024-11-24T08:48:46,191 WARN [IPC Server handler 3 on default port 36097 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438115008 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-24T08:48:46,194 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438115008 after 4ms 2024-11-24T08:48:46,676 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:47,533 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:48,189 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:48,191 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438126180 2024-11-24T08:48:48,192 WARN [ResponseProcessor for block BP-1677742332-172.17.0.2-1732438113484:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1677742332-172.17.0.2-1732438113484:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:48,193 WARN [DataStreamer for file /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438126180 block BP-1677742332-172.17.0.2-1732438113484:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK], DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK]) is bad. 2024-11-24T08:48:48,194 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:49756 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:33305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49756 dst: /127.0.0.1:33305 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:48,194 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:47404 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:34413:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47404 dst: /127.0.0.1:34413 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:48,199 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7991aaa4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:48:48,199 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4eebb985{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:48:48,199 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:48:48,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@412f6c3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:48:48,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3049786d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/hadoop.log.dir/,STOPPED} 2024-11-24T08:48:48,201 WARN [BP-1677742332-172.17.0.2-1732438113484 heartbeating to localhost/127.0.0.1:36097 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:48:48,201 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:48:48,201 WARN [BP-1677742332-172.17.0.2-1732438113484 heartbeating to localhost/127.0.0.1:36097 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1677742332-172.17.0.2-1732438113484 (Datanode Uuid 0529ac69-c714-4f7a-9d57-9dc2bedeb881) service to localhost/127.0.0.1:36097 2024-11-24T08:48:48,201 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:48:48,202 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data7/current/BP-1677742332-172.17.0.2-1732438113484 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:48:48,202 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data8/current/BP-1677742332-172.17.0.2-1732438113484 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:48:48,202 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:48:48,677 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:49,534 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:50,191 WARN [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]] 2024-11-24T08:48:50,191 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:50,191 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 469387a2cdb6%2C40901%2C1732438114603:(num 1732438126180) roll requested 2024-11-24T08:48:50,191 INFO [regionserver/469387a2cdb6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C40901%2C1732438114603.1732438130191 2024-11-24T08:48:50,195 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438115008 after 4007ms 2024-11-24T08:48:50,196 WARN [Thread-908 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37885 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:50,195 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:60964 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741839_1021] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data6]'}, localName='127.0.0.1:34413', datanodeUuid='6e0838c4-cf76-46d3-9f69-92550c40108c', xmitsInProgress=0}:Exception transferring block BP-1677742332-172.17.0.2-1732438113484:blk_1073741839_1021 to mirror 127.0.0.1:37885 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:50,196 WARN [Thread-908 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK], DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]) is bad. 2024-11-24T08:48:50,196 WARN [Thread-908 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741839_1021 2024-11-24T08:48:50,196 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:60964 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741839_1021] {}] datanode.BlockReceiver(316): Block 1073741839 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T08:48:50,196 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:60964 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:34413:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60964 dst: /127.0.0.1:34413 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:50,199 WARN [Thread-908 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK] 2024-11-24T08:48:50,203 WARN [Thread-908 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39427 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:50,203 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:52924 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741840_1022] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data10]'}, localName='127.0.0.1:32883', datanodeUuid='ef0fea69-0c34-4c33-aa8d-5dddb5f2e3d7', xmitsInProgress=0}:Exception transferring block BP-1677742332-172.17.0.2-1732438113484:blk_1073741840_1022 to mirror 127.0.0.1:39427 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:50,203 WARN [Thread-908 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK], DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK]) is bad. 2024-11-24T08:48:50,203 WARN [Thread-908 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741840_1022 2024-11-24T08:48:50,203 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:52924 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741840_1022] {}] datanode.BlockReceiver(316): Block 1073741840 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T08:48:50,204 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:52924 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:32883:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52924 dst: /127.0.0.1:32883 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:50,204 WARN [Thread-908 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK] 2024-11-24T08:48:50,206 WARN [Thread-908 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:50,206 WARN [Thread-908 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK], DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK]) is bad. 2024-11-24T08:48:50,206 WARN [Thread-908 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741841_1023 2024-11-24T08:48:50,207 WARN [Thread-908 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK] 2024-11-24T08:48:50,207 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T08:48:50,212 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:50,212 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:50,213 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:50,213 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:50,213 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:50,213 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438126180 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438130191 2024-11-24T08:48:50,214 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37687:37687),(127.0.0.1/127.0.0.1:45047:45047)] 2024-11-24T08:48:50,214 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438115008 is not closed yet, will try archiving it next time 2024-11-24T08:48:50,214 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438126180 is not closed yet, will try archiving it next time 2024-11-24T08:48:50,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741838_1020 (size=3600) 2024-11-24T08:48:50,616 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438115008 is not closed yet, will try archiving it next time 2024-11-24T08:48:50,678 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:51,534 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:51,856 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@61cde5f3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34413, datanodeUuid=6e0838c4-cf76-46d3-9f69-92550c40108c, infoPort=45047, infoSecurePort=0, ipcPort=36647, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484):Failed to transfer BP-1677742332-172.17.0.2-1732438113484:blk_1073741838_1020 to 127.0.0.1:33305 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:52,210 WARN [ResponseProcessor for block BP-1677742332-172.17.0.2-1732438113484:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1677742332-172.17.0.2-1732438113484:blk_1073741842_1024 java.io.IOException: Bad response ERROR for BP-1677742332-172.17.0.2-1732438113484:blk_1073741842_1024 from datanode DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:52,211 WARN [DataStreamer for file /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438130191 block BP-1677742332-172.17.0.2-1732438113484:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK], DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]) is bad. 2024-11-24T08:48:52,211 WARN [PacketResponder: BP-1677742332-172.17.0.2-1732438113484:blk_1073741842_1024, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34413] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:52,211 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:52938 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:32883:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52938 dst: /127.0.0.1:32883 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:52,212 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:60970 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:34413:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60970 dst: /127.0.0.1:34413 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:52,212 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@d5a0567{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:48:52,213 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@168c0f83{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:48:52,213 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:48:52,213 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6357ab18{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:48:52,213 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@443823b4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/hadoop.log.dir/,STOPPED} 2024-11-24T08:48:52,214 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:48:52,214 WARN [BP-1677742332-172.17.0.2-1732438113484 heartbeating to localhost/127.0.0.1:36097 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:48:52,214 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:48:52,214 WARN [BP-1677742332-172.17.0.2-1732438113484 heartbeating to localhost/127.0.0.1:36097 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1677742332-172.17.0.2-1732438113484 (Datanode Uuid 6e0838c4-cf76-46d3-9f69-92550c40108c) service to localhost/127.0.0.1:36097 2024-11-24T08:48:52,214 WARN [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK]] 2024-11-24T08:48:52,214 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:52,214 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 469387a2cdb6%2C40901%2C1732438114603:(num 1732438130191) roll requested 2024-11-24T08:48:52,214 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data5/current/BP-1677742332-172.17.0.2-1732438113484 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:48:52,214 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data6/current/BP-1677742332-172.17.0.2-1732438113484 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:48:52,215 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:48:52,215 INFO [regionserver/469387a2cdb6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C40901%2C1732438114603.1732438132214 2024-11-24T08:48:52,218 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:52,218 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK], DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]) is bad. 2024-11-24T08:48:52,218 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741843_1026 2024-11-24T08:48:52,218 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK] 2024-11-24T08:48:52,220 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:52,220 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK], DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK]) is bad. 2024-11-24T08:48:52,220 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741844_1027 2024-11-24T08:48:52,221 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK] 2024-11-24T08:48:52,222 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:52,222 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK], DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]) is bad. 2024-11-24T08:48:52,222 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741845_1028 2024-11-24T08:48:52,223 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK] 2024-11-24T08:48:52,224 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:52,224 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK], DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK]) is bad. 2024-11-24T08:48:52,224 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741846_1029 2024-11-24T08:48:52,225 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK] 2024-11-24T08:48:52,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40901 {}] regionserver.HRegion(8855): Flush requested on a20423784cc4152477ea73ae19a5d531 2024-11-24T08:48:52,226 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a20423784cc4152477ea73ae19a5d531 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:48:52,227 WARN [IPC Server handler 2 on default port 36097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T08:48:52,227 WARN [IPC Server handler 2 on default port 36097 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T08:48:52,227 WARN [IPC Server handler 2 on default port 36097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T08:48:52,234 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:52,234 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:52,234 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:52,234 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:52,234 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:52,235 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438130191 with entries=8, filesize=8.39 KB; new WAL /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438132214 2024-11-24T08:48:52,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741842_1025 (size=8599) 2024-11-24T08:48:52,242 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37687:37687)] 2024-11-24T08:48:52,242 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438115008 is not closed yet, will try archiving it next time 2024-11-24T08:48:52,242 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438130191 is not closed yet, will try archiving it next time 2024-11-24T08:48:52,251 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp/info/d404d00f1cdc4f0b826a87c139e26f86 is 1080, key is row0002/info:/1732438128203/Put/seqid=0 2024-11-24T08:48:52,253 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:52,253 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK], DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]) is bad. 2024-11-24T08:48:52,253 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741848_1031 2024-11-24T08:48:52,254 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK] 2024-11-24T08:48:52,256 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33305 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:52,256 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:52946 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data10]'}, localName='127.0.0.1:32883', datanodeUuid='ef0fea69-0c34-4c33-aa8d-5dddb5f2e3d7', xmitsInProgress=0}:Exception transferring block BP-1677742332-172.17.0.2-1732438113484:blk_1073741849_1032 to mirror 127.0.0.1:33305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:52,256 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK], DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK]) is bad. 2024-11-24T08:48:52,256 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741849_1032 2024-11-24T08:48:52,256 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:52946 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T08:48:52,256 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:52946 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:32883:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52946 dst: /127.0.0.1:32883 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:52,257 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK] 2024-11-24T08:48:52,262 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34413 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:52,262 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK], DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]) is bad. 2024-11-24T08:48:52,262 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:52948 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data10]'}, localName='127.0.0.1:32883', datanodeUuid='ef0fea69-0c34-4c33-aa8d-5dddb5f2e3d7', xmitsInProgress=0}:Exception transferring block BP-1677742332-172.17.0.2-1732438113484:blk_1073741850_1033 to mirror 127.0.0.1:34413 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:52,262 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741850_1033 2024-11-24T08:48:52,262 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:52948 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T08:48:52,263 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:52948 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:32883:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52948 dst: /127.0.0.1:32883 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:52,263 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK] 2024-11-24T08:48:52,264 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:52,265 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK], DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK]) is bad. 2024-11-24T08:48:52,265 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741851_1034 2024-11-24T08:48:52,265 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK] 2024-11-24T08:48:52,266 WARN [IPC Server handler 2 on default port 36097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T08:48:52,266 WARN [IPC Server handler 2 on default port 36097 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T08:48:52,267 WARN [IPC Server handler 2 on default port 36097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T08:48:52,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741852_1035 (size=10347) 2024-11-24T08:48:52,638 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438115008 is not closed yet, will try archiving it next time 2024-11-24T08:48:52,671 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp/info/d404d00f1cdc4f0b826a87c139e26f86 2024-11-24T08:48:52,678 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:52,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp/info/d404d00f1cdc4f0b826a87c139e26f86 as hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/d404d00f1cdc4f0b826a87c139e26f86 2024-11-24T08:48:52,690 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/d404d00f1cdc4f0b826a87c139e26f86, entries=5, sequenceid=11, filesize=10.1 K 2024-11-24T08:48:52,691 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for a20423784cc4152477ea73ae19a5d531 in 466ms, sequenceid=11, compaction requested=false 2024-11-24T08:48:52,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a20423784cc4152477ea73ae19a5d531: 2024-11-24T08:48:52,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40901 {}] regionserver.HRegion(8855): Flush requested on a20423784cc4152477ea73ae19a5d531 2024-11-24T08:48:52,868 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a20423784cc4152477ea73ae19a5d531 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-24T08:48:52,876 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp/info/b3bb6db5bec949eebd2e12e1593340a0 is 1080, key is row0007/info:/1732438132227/Put/seqid=0 2024-11-24T08:48:52,879 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34413 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:52,879 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:52976 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741853_1036] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data10]'}, localName='127.0.0.1:32883', datanodeUuid='ef0fea69-0c34-4c33-aa8d-5dddb5f2e3d7', xmitsInProgress=0}:Exception transferring block BP-1677742332-172.17.0.2-1732438113484:blk_1073741853_1036 to mirror 127.0.0.1:34413 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:52,880 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK], DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]) is bad. 2024-11-24T08:48:52,880 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741853_1036 2024-11-24T08:48:52,880 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:52976 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741853_1036] {}] datanode.BlockReceiver(316): Block 1073741853 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T08:48:52,880 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:52976 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741853_1036] {}] datanode.DataXceiver(331): 127.0.0.1:32883:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52976 dst: /127.0.0.1:32883 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:52,881 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK] 2024-11-24T08:48:52,882 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:52,882 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK], DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]) is bad. 2024-11-24T08:48:52,882 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741854_1037 2024-11-24T08:48:52,883 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK] 2024-11-24T08:48:52,884 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:52,884 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK], DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK]) is bad. 2024-11-24T08:48:52,884 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741855_1038 2024-11-24T08:48:52,885 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK] 2024-11-24T08:48:52,886 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:52,886 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK], DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK]) is bad. 2024-11-24T08:48:52,886 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741856_1039 2024-11-24T08:48:52,887 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK] 2024-11-24T08:48:52,888 WARN [IPC Server handler 4 on default port 36097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T08:48:52,888 WARN [IPC Server handler 4 on default port 36097 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T08:48:52,888 WARN [IPC Server handler 4 on default port 36097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T08:48:52,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741857_1040 (size=12506) 2024-11-24T08:48:53,292 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp/info/b3bb6db5bec949eebd2e12e1593340a0 2024-11-24T08:48:53,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp/info/b3bb6db5bec949eebd2e12e1593340a0 as hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/b3bb6db5bec949eebd2e12e1593340a0 2024-11-24T08:48:53,309 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/b3bb6db5bec949eebd2e12e1593340a0, entries=7, sequenceid=24, filesize=12.2 K 2024-11-24T08:48:53,310 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for a20423784cc4152477ea73ae19a5d531 in 442ms, sequenceid=24, compaction requested=false 2024-11-24T08:48:53,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a20423784cc4152477ea73ae19a5d531: 2024-11-24T08:48:53,311 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-24T08:48:53,311 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:48:53,311 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/b3bb6db5bec949eebd2e12e1593340a0 because midkey is the same as first or last row 2024-11-24T08:48:53,535 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:54,243 WARN [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK]] 2024-11-24T08:48:54,243 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:54,243 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 469387a2cdb6%2C40901%2C1732438114603:(num 1732438132214) roll requested 2024-11-24T08:48:54,243 INFO [regionserver/469387a2cdb6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C40901%2C1732438114603.1732438134243 2024-11-24T08:48:54,247 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37885 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:54,247 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:52996 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741858_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data10]'}, localName='127.0.0.1:32883', datanodeUuid='ef0fea69-0c34-4c33-aa8d-5dddb5f2e3d7', xmitsInProgress=0}:Exception transferring block BP-1677742332-172.17.0.2-1732438113484:blk_1073741858_1041 to mirror 127.0.0.1:37885 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:54,247 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK], DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]) is bad. 2024-11-24T08:48:54,247 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:52996 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741858_1041] {}] datanode.BlockReceiver(316): Block 1073741858 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T08:48:54,247 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741858_1041 2024-11-24T08:48:54,247 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:52996 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741858_1041] {}] datanode.DataXceiver(331): 127.0.0.1:32883:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52996 dst: /127.0.0.1:32883 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:54,248 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK] 2024-11-24T08:48:54,249 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:54,250 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK], DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK]) is bad. 2024-11-24T08:48:54,250 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741859_1042 2024-11-24T08:48:54,250 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK] 2024-11-24T08:48:54,252 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:54,252 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK], DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]) is bad. 2024-11-24T08:48:54,252 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741860_1043 2024-11-24T08:48:54,253 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK] 2024-11-24T08:48:54,256 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33305 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:54,256 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:53004 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data10]'}, localName='127.0.0.1:32883', datanodeUuid='ef0fea69-0c34-4c33-aa8d-5dddb5f2e3d7', xmitsInProgress=0}:Exception transferring block BP-1677742332-172.17.0.2-1732438113484:blk_1073741861_1044 to mirror 127.0.0.1:33305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:54,256 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK], DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK]) is bad. 2024-11-24T08:48:54,256 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741861_1044 2024-11-24T08:48:54,256 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:53004 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T08:48:54,256 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:53004 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:32883:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53004 dst: /127.0.0.1:32883 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:54,257 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK] 2024-11-24T08:48:54,257 WARN [IPC Server handler 1 on default port 36097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T08:48:54,257 WARN [IPC Server handler 1 on default port 36097 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T08:48:54,258 WARN [IPC Server handler 1 on default port 36097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T08:48:54,260 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:54,260 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:54,260 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:54,260 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:54,260 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:48:54,261 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438132214 with entries=16, filesize=15.93 KB; new WAL /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438134243 2024-11-24T08:48:54,261 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37687:37687)] 2024-11-24T08:48:54,261 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438115008 is not closed yet, will try archiving it next time 2024-11-24T08:48:54,261 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438132214 is not closed yet, will try archiving it next time 2024-11-24T08:48:54,262 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438126180 to hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/oldWALs/469387a2cdb6%2C40901%2C1732438114603.1732438126180 2024-11-24T08:48:54,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741847_1030 (size=16317) 2024-11-24T08:48:54,264 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438130191 to hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/oldWALs/469387a2cdb6%2C40901%2C1732438114603.1732438130191 2024-11-24T08:48:54,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40901 {}] regionserver.HRegion(8855): Flush requested on a20423784cc4152477ea73ae19a5d531 2024-11-24T08:48:54,307 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a20423784cc4152477ea73ae19a5d531 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T08:48:54,316 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp/info/db3a66456e68492b85854d5a7ec27d5e is 1079, key is tmprow/info:/1732438134305/Put/seqid=0 2024-11-24T08:48:54,318 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:54,318 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK], DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]) is bad. 2024-11-24T08:48:54,318 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741863_1046 2024-11-24T08:48:54,319 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK] 2024-11-24T08:48:54,320 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:54,320 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK], DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK]) is bad. 2024-11-24T08:48:54,320 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741864_1047 2024-11-24T08:48:54,321 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK] 2024-11-24T08:48:54,322 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:54,322 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK], DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK]) is bad. 2024-11-24T08:48:54,322 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741865_1048 2024-11-24T08:48:54,323 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK] 2024-11-24T08:48:54,324 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:54,324 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK], DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]) is bad. 2024-11-24T08:48:54,324 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741866_1049 2024-11-24T08:48:54,325 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK] 2024-11-24T08:48:54,325 WARN [IPC Server handler 3 on default port 36097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T08:48:54,325 WARN [IPC Server handler 3 on default port 36097 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T08:48:54,325 WARN [IPC Server handler 3 on default port 36097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T08:48:54,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741867_1050 (size=6027) 2024-11-24T08:48:54,665 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438115008 is not closed yet, will try archiving it next time 2024-11-24T08:48:54,679 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:54,730 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp/info/db3a66456e68492b85854d5a7ec27d5e 2024-11-24T08:48:54,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp/info/db3a66456e68492b85854d5a7ec27d5e as hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/db3a66456e68492b85854d5a7ec27d5e 2024-11-24T08:48:54,750 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/db3a66456e68492b85854d5a7ec27d5e, entries=1, sequenceid=34, filesize=5.9 K 2024-11-24T08:48:54,751 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for a20423784cc4152477ea73ae19a5d531 in 444ms, sequenceid=34, compaction requested=true 2024-11-24T08:48:54,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a20423784cc4152477ea73ae19a5d531: 2024-11-24T08:48:54,751 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-24T08:48:54,751 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:48:54,751 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/b3bb6db5bec949eebd2e12e1593340a0 because midkey is the same as first or last row 2024-11-24T08:48:54,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a20423784cc4152477ea73ae19a5d531:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:48:54,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:48:54,752 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:48:54,753 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:48:54,753 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.HStore(1541): a20423784cc4152477ea73ae19a5d531/info is initiating minor compaction (all files) 2024-11-24T08:48:54,753 INFO [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a20423784cc4152477ea73ae19a5d531/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531. 2024-11-24T08:48:54,753 INFO [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/d404d00f1cdc4f0b826a87c139e26f86, hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/b3bb6db5bec949eebd2e12e1593340a0, hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/db3a66456e68492b85854d5a7ec27d5e] into tmpdir=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp, totalSize=28.2 K 2024-11-24T08:48:54,754 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] compactions.Compactor(225): Compacting d404d00f1cdc4f0b826a87c139e26f86, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732438128203 2024-11-24T08:48:54,754 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] compactions.Compactor(225): Compacting b3bb6db5bec949eebd2e12e1593340a0, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732438132227 2024-11-24T08:48:54,755 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] compactions.Compactor(225): Compacting db3a66456e68492b85854d5a7ec27d5e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732438134305 2024-11-24T08:48:54,767 INFO [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a20423784cc4152477ea73ae19a5d531#info#compaction#21 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:48:54,767 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp/info/ed17b39e368949b38eb1e9deabfa6554 is 1080, key is row0002/info:/1732438128203/Put/seqid=0 2024-11-24T08:48:54,769 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:54,769 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK], DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK]) is bad. 2024-11-24T08:48:54,769 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741868_1051 2024-11-24T08:48:54,770 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK] 2024-11-24T08:48:54,771 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:54,771 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK], DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]) is bad. 2024-11-24T08:48:54,771 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741869_1052 2024-11-24T08:48:54,772 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK] 2024-11-24T08:48:54,773 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:54,773 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK], DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK]) is bad. 2024-11-24T08:48:54,774 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741870_1053 2024-11-24T08:48:54,774 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK] 2024-11-24T08:48:54,776 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34413 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:54,776 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:53058 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data10]'}, localName='127.0.0.1:32883', datanodeUuid='ef0fea69-0c34-4c33-aa8d-5dddb5f2e3d7', xmitsInProgress=0}:Exception transferring block BP-1677742332-172.17.0.2-1732438113484:blk_1073741871_1054 to mirror 127.0.0.1:34413 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:54,777 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK], DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]) is bad. 2024-11-24T08:48:54,777 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741871_1054 2024-11-24T08:48:54,777 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:53058 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T08:48:54,777 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:53058 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:32883:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53058 dst: /127.0.0.1:32883 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:54,777 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK] 2024-11-24T08:48:54,778 WARN [IPC Server handler 0 on default port 36097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T08:48:54,778 WARN [IPC Server handler 0 on default port 36097 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T08:48:54,778 WARN [IPC Server handler 0 on default port 36097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T08:48:54,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741872_1055 (size=17994) 2024-11-24T08:48:55,119 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1a9c7cc3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32883, datanodeUuid=ef0fea69-0c34-4c33-aa8d-5dddb5f2e3d7, infoPort=37687, infoSecurePort=0, ipcPort=41447, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484):Failed to transfer BP-1677742332-172.17.0.2-1732438113484:blk_1073741842_1025 to 127.0.0.1:34413 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:55,119 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1ea3eb48[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32883, datanodeUuid=ef0fea69-0c34-4c33-aa8d-5dddb5f2e3d7, infoPort=37687, infoSecurePort=0, ipcPort=41447, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484):Failed to transfer BP-1677742332-172.17.0.2-1732438113484:blk_1073741852_1035 to 127.0.0.1:39427 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:55,192 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp/info/ed17b39e368949b38eb1e9deabfa6554 as hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/ed17b39e368949b38eb1e9deabfa6554 2024-11-24T08:48:55,201 INFO [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a20423784cc4152477ea73ae19a5d531/info of a20423784cc4152477ea73ae19a5d531 into ed17b39e368949b38eb1e9deabfa6554(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:48:55,202 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a20423784cc4152477ea73ae19a5d531: 2024-11-24T08:48:55,202 INFO [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531., storeName=a20423784cc4152477ea73ae19a5d531/info, priority=13, startTime=1732438134752; duration=0sec 2024-11-24T08:48:55,202 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-24T08:48:55,202 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:48:55,202 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/ed17b39e368949b38eb1e9deabfa6554 because midkey is the same as first or last row 2024-11-24T08:48:55,202 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-24T08:48:55,202 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:48:55,202 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/ed17b39e368949b38eb1e9deabfa6554 because midkey is the same as first or last row 2024-11-24T08:48:55,202 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-24T08:48:55,202 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:48:55,202 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/ed17b39e368949b38eb1e9deabfa6554 because midkey is the same as first or last row 2024-11-24T08:48:55,202 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:48:55,202 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a20423784cc4152477ea73ae19a5d531:info 2024-11-24T08:48:55,535 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:55,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40901 {}] regionserver.HRegion(8855): Flush requested on a20423784cc4152477ea73ae19a5d531 2024-11-24T08:48:55,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a20423784cc4152477ea73ae19a5d531 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T08:48:55,751 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp/info/4e64305f84ba496e9b5ead31d0d5d5ae is 1079, key is tmprow/info:/1732438135740/Put/seqid=0 2024-11-24T08:48:55,753 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:55,753 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK], DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]) is bad. 2024-11-24T08:48:55,753 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741873_1056 2024-11-24T08:48:55,753 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK] 2024-11-24T08:48:55,754 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:55,754 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK], DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK]) is bad. 2024-11-24T08:48:55,754 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741874_1057 2024-11-24T08:48:55,755 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK] 2024-11-24T08:48:55,756 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:55,756 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK], DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]) is bad. 2024-11-24T08:48:55,756 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741875_1058 2024-11-24T08:48:55,757 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK] 2024-11-24T08:48:55,758 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:55,758 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK], DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK]) is bad. 2024-11-24T08:48:55,758 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741876_1059 2024-11-24T08:48:55,759 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39427,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK] 2024-11-24T08:48:55,759 WARN [IPC Server handler 4 on default port 36097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T08:48:55,759 WARN [IPC Server handler 4 on default port 36097 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T08:48:55,759 WARN [IPC Server handler 4 on default port 36097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T08:48:55,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741877_1060 (size=6027) 2024-11-24T08:48:56,118 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1a9c7cc3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32883, datanodeUuid=ef0fea69-0c34-4c33-aa8d-5dddb5f2e3d7, infoPort=37687, infoSecurePort=0, ipcPort=41447, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484):Failed to transfer BP-1677742332-172.17.0.2-1732438113484:blk_1073741847_1030 to 127.0.0.1:37885 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:56,118 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1ea3eb48[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32883, datanodeUuid=ef0fea69-0c34-4c33-aa8d-5dddb5f2e3d7, infoPort=37687, infoSecurePort=0, ipcPort=41447, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484):Failed to transfer BP-1677742332-172.17.0.2-1732438113484:blk_1073741857_1040 to 127.0.0.1:33305 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:56,164 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp/info/4e64305f84ba496e9b5ead31d0d5d5ae 2024-11-24T08:48:56,177 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp/info/4e64305f84ba496e9b5ead31d0d5d5ae as hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/4e64305f84ba496e9b5ead31d0d5d5ae 2024-11-24T08:48:56,183 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/4e64305f84ba496e9b5ead31d0d5d5ae, entries=1, sequenceid=45, filesize=5.9 K 2024-11-24T08:48:56,184 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for a20423784cc4152477ea73ae19a5d531 in 442ms, sequenceid=45, compaction requested=false 2024-11-24T08:48:56,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a20423784cc4152477ea73ae19a5d531: 2024-11-24T08:48:56,184 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-24T08:48:56,184 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:48:56,185 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/ed17b39e368949b38eb1e9deabfa6554 because midkey is the same as first or last row 2024-11-24T08:48:56,262 WARN [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-24T08:48:56,263 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:56,365 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:48:56,368 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:48:56,368 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:48:56,368 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:48:56,369 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:48:56,369 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b392de9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:48:56,369 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55dafd43{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:48:56,463 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5a60e80d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/java.io.tmpdir/jetty-localhost-33481-hadoop-hdfs-3_4_1-tests_jar-_-any-2128036507368702998/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:48:56,463 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3a864bf5{HTTP/1.1, (http/1.1)}{localhost:33481} 2024-11-24T08:48:56,463 INFO [Time-limited test {}] server.Server(415): Started @127080ms 2024-11-24T08:48:56,464 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:48:56,540 WARN [Thread-976 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:48:56,547 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x20ebf6645d97d9f3 with lease ID 0x93b51bcf413bec7e: from storage DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe node DatanodeRegistration(127.0.0.1:33675, datanodeUuid=dfe95277-a083-409b-81fc-9930f31c832c, infoPort=45561, infoSecurePort=0, ipcPort=38785, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:48:56,547 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x20ebf6645d97d9f3 with lease ID 0x93b51bcf413bec7e: from storage DS-112cf404-b46d-44f1-b1e2-4efb69f5869e node DatanodeRegistration(127.0.0.1:33675, datanodeUuid=dfe95277-a083-409b-81fc-9930f31c832c, infoPort=45561, infoSecurePort=0, ipcPort=38785, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:48:56,679 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:57,536 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:58,117 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1a9c7cc3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32883, datanodeUuid=ef0fea69-0c34-4c33-aa8d-5dddb5f2e3d7, infoPort=37687, infoSecurePort=0, ipcPort=41447, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484):Failed to transfer BP-1677742332-172.17.0.2-1732438113484:blk_1073741867_1050 to 127.0.0.1:34413 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:48:58,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33675 is added to blk_1073741872_1055 (size=17994) 2024-11-24T08:48:58,263 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:58,680 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:48:59,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33675 is added to blk_1073741877_1060 (size=6027) 2024-11-24T08:48:59,536 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:00,263 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:00,680 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:01,537 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:02,264 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:02,681 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:03,537 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:04,097 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T08:49:04,265 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:04,681 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:04,767 ERROR [FSHLog-0-hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData-prefix:469387a2cdb6,39181,1732438114554 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:04,767 WARN [FSHLog-0-hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData-prefix:469387a2cdb6,39181,1732438114554 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:04,767 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 469387a2cdb6%2C39181%2C1732438114554:(num 1732438114695) roll requested 2024-11-24T08:49:04,768 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C39181%2C1732438114554.1732438144767 2024-11-24T08:49:04,774 WARN [Thread-998 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34413 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:04,774 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-328006441_22 at /127.0.0.1:36334 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741878_1061] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data4]'}, localName='127.0.0.1:33675', datanodeUuid='dfe95277-a083-409b-81fc-9930f31c832c', xmitsInProgress=0}:Exception transferring block BP-1677742332-172.17.0.2-1732438113484:blk_1073741878_1061 to mirror 127.0.0.1:34413 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:04,775 WARN [Thread-998 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33675,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK], DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]) is bad. 2024-11-24T08:49:04,775 WARN [Thread-998 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741878_1061 2024-11-24T08:49:04,775 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-328006441_22 at /127.0.0.1:36334 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741878_1061] {}] datanode.BlockReceiver(316): Block 1073741878 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T08:49:04,775 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-328006441_22 at /127.0.0.1:36334 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741878_1061] {}] datanode.DataXceiver(331): 127.0.0.1:33675:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36334 dst: /127.0.0.1:33675 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:04,776 WARN [Thread-998 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK] 2024-11-24T08:49:04,779 WARN [Thread-998 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33305 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:04,779 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-328006441_22 at /127.0.0.1:36350 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741879_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data4]'}, localName='127.0.0.1:33675', datanodeUuid='dfe95277-a083-409b-81fc-9930f31c832c', xmitsInProgress=0}:Exception transferring block BP-1677742332-172.17.0.2-1732438113484:blk_1073741879_1062 to mirror 127.0.0.1:33305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:04,779 WARN [Thread-998 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33675,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK], DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK]) is bad. 2024-11-24T08:49:04,779 WARN [Thread-998 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741879_1062 2024-11-24T08:49:04,779 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-328006441_22 at /127.0.0.1:36350 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741879_1062] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T08:49:04,780 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-328006441_22 at /127.0.0.1:36350 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741879_1062] {}] datanode.DataXceiver(331): 127.0.0.1:33675:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36350 dst: /127.0.0.1:33675 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:04,780 WARN [Thread-998 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33305,DS-3ebdb449-65c3-43d9-93fb-c2ea9cadcee3,DISK] 2024-11-24T08:49:04,785 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:04,785 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:04,785 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:04,785 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:04,786 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:04,786 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/WALs/469387a2cdb6,39181,1732438114554/469387a2cdb6%2C39181%2C1732438114554.1732438114695 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/WALs/469387a2cdb6,39181,1732438114554/469387a2cdb6%2C39181%2C1732438114554.1732438144767 2024-11-24T08:49:04,786 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:04,787 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:04,787 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/WALs/469387a2cdb6,39181,1732438114554/469387a2cdb6%2C39181%2C1732438114554.1732438114695 2024-11-24T08:49:04,787 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37687:37687),(127.0.0.1/127.0.0.1:45561:45561)] 2024-11-24T08:49:04,787 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/WALs/469387a2cdb6,39181,1732438114554/469387a2cdb6%2C39181%2C1732438114554.1732438114695 is not closed yet, will try archiving it next time 2024-11-24T08:49:04,787 WARN [IPC Server handler 1 on default port 36097 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/WALs/469387a2cdb6,39181,1732438114554/469387a2cdb6%2C39181%2C1732438114554.1732438114695 has not been closed. Lease recovery is in progress. RecoveryId = 1064 for block blk_1073741830_1006 2024-11-24T08:49:04,788 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/WALs/469387a2cdb6,39181,1732438114554/469387a2cdb6%2C39181%2C1732438114554.1732438114695 after 0ms 2024-11-24T08:49:05,538 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:06,265 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:06,561 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6b69a580 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1677742332-172.17.0.2-1732438113484:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:37885,null,null]) java.net.ConnectException: Call From 469387a2cdb6/172.17.0.2 to localhost:42701 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-24T08:49:06,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33675 is added to blk_1073741833_1019 (size=455) 2024-11-24T08:49:07,223 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438115008 to hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/oldWALs/469387a2cdb6%2C40901%2C1732438114603.1732438115008 2024-11-24T08:49:07,227 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438132214 to hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/oldWALs/469387a2cdb6%2C40901%2C1732438114603.1732438132214 2024-11-24T08:49:07,538 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:07,546 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5a48c848[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33675, datanodeUuid=dfe95277-a083-409b-81fc-9930f31c832c, infoPort=45561, infoSecurePort=0, ipcPort=38785, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484):Failed to transfer BP-1677742332-172.17.0.2-1732438113484:blk_1073741833_1019 to 127.0.0.1:34413 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:08,266 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:08,791 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/WALs/469387a2cdb6,39181,1732438114554/469387a2cdb6%2C39181%2C1732438114554.1732438114695 after 4003ms 2024-11-24T08:49:09,539 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:10,267 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:10,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:49:10,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741836_1012 (size=76) 2024-11-24T08:49:11,540 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:11,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:49:11,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:49:11,903 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C40901%2C1732438114603.1732438151903 2024-11-24T08:49:11,907 WARN [Thread-1017 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:11,907 WARN [Thread-1017 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741881_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK], DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]) is bad. 2024-11-24T08:49:11,907 WARN [Thread-1017 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741881_1065 2024-11-24T08:49:11,908 WARN [Thread-1017 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK] 2024-11-24T08:49:11,914 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:11,914 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:11,914 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:11,914 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:11,914 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:11,915 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438134243 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438151903 2024-11-24T08:49:11,915 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37687:37687),(127.0.0.1/127.0.0.1:45561:45561)] 2024-11-24T08:49:11,915 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.1732438134243 is not closed yet, will try archiving it next time 2024-11-24T08:49:11,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741862_1045 (size=13591) 2024-11-24T08:49:11,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40901 {}] regionserver.HRegion(8855): Flush requested on a20423784cc4152477ea73ae19a5d531 2024-11-24T08:49:11,927 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a20423784cc4152477ea73ae19a5d531 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T08:49:11,932 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp/info/8e1d92ff87884f9bb08aec511715cb99 is 1080, key is row0013/info:/1732438151917/Put/seqid=0 2024-11-24T08:49:11,935 WARN [Thread-1023 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1067 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34413 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:11,935 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:52228 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741883_1067] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data10]'}, localName='127.0.0.1:32883', datanodeUuid='ef0fea69-0c34-4c33-aa8d-5dddb5f2e3d7', xmitsInProgress=0}:Exception transferring block BP-1677742332-172.17.0.2-1732438113484:blk_1073741883_1067 to mirror 127.0.0.1:34413 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:11,935 WARN [Thread-1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741883_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK], DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]) is bad. 2024-11-24T08:49:11,935 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:52228 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741883_1067] {}] datanode.BlockReceiver(316): Block 1073741883 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T08:49:11,935 WARN [Thread-1023 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741883_1067 2024-11-24T08:49:11,935 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1973926404_22 at /127.0.0.1:52228 [Receiving block BP-1677742332-172.17.0.2-1732438113484:blk_1073741883_1067] {}] datanode.DataXceiver(331): 127.0.0.1:32883:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52228 dst: /127.0.0.1:32883 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:11,936 WARN [Thread-1023 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK] 2024-11-24T08:49:11,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33675 is added to blk_1073741884_1068 (size=11421) 2024-11-24T08:49:11,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741884_1068 (size=11421) 2024-11-24T08:49:11,943 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp/info/8e1d92ff87884f9bb08aec511715cb99 2024-11-24T08:49:11,949 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp/info/8e1d92ff87884f9bb08aec511715cb99 as hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/8e1d92ff87884f9bb08aec511715cb99 2024-11-24T08:49:11,954 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/8e1d92ff87884f9bb08aec511715cb99, entries=6, sequenceid=55, filesize=11.2 K 2024-11-24T08:49:11,955 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for a20423784cc4152477ea73ae19a5d531 in 28ms, sequenceid=55, compaction requested=true 2024-11-24T08:49:11,955 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a20423784cc4152477ea73ae19a5d531: 2024-11-24T08:49:11,956 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-11-24T08:49:11,956 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:49:11,956 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/ed17b39e368949b38eb1e9deabfa6554 because midkey is the same as first or last row 2024-11-24T08:49:11,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a20423784cc4152477ea73ae19a5d531:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:49:11,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:49:11,956 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:49:11,957 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:49:11,957 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.HStore(1541): a20423784cc4152477ea73ae19a5d531/info is initiating minor compaction (all files) 2024-11-24T08:49:11,958 INFO [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a20423784cc4152477ea73ae19a5d531/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531. 2024-11-24T08:49:11,958 INFO [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/ed17b39e368949b38eb1e9deabfa6554, hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/4e64305f84ba496e9b5ead31d0d5d5ae, hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/8e1d92ff87884f9bb08aec511715cb99] into tmpdir=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp, totalSize=34.6 K 2024-11-24T08:49:11,958 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] compactions.Compactor(225): Compacting ed17b39e368949b38eb1e9deabfa6554, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732438128203 2024-11-24T08:49:11,959 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4e64305f84ba496e9b5ead31d0d5d5ae, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732438135740 2024-11-24T08:49:11,959 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8e1d92ff87884f9bb08aec511715cb99, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732438136149 2024-11-24T08:49:11,980 INFO [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a20423784cc4152477ea73ae19a5d531#info#compaction#24 average throughput is 8.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:49:11,981 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp/info/6ad46da139934cc4b3fe767037f1a8ac is 1080, key is row0002/info:/1732438128203/Put/seqid=0 2024-11-24T08:49:11,983 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:11,983 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741885_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK], DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]) is bad. 2024-11-24T08:49:11,983 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741885_1069 2024-11-24T08:49:11,984 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK] 2024-11-24T08:49:11,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741886_1070 (size=23502) 2024-11-24T08:49:11,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33675 is added to blk_1073741886_1070 (size=23502) 2024-11-24T08:49:11,997 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp/info/6ad46da139934cc4b3fe767037f1a8ac as hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/6ad46da139934cc4b3fe767037f1a8ac 2024-11-24T08:49:12,008 INFO [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a20423784cc4152477ea73ae19a5d531/info of a20423784cc4152477ea73ae19a5d531 into 6ad46da139934cc4b3fe767037f1a8ac(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:49:12,008 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a20423784cc4152477ea73ae19a5d531: 2024-11-24T08:49:12,008 INFO [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531., storeName=a20423784cc4152477ea73ae19a5d531/info, priority=13, startTime=1732438151956; duration=0sec 2024-11-24T08:49:12,008 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-24T08:49:12,008 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:49:12,008 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/6ad46da139934cc4b3fe767037f1a8ac because midkey is the same as first or last row 2024-11-24T08:49:12,008 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-24T08:49:12,008 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:49:12,008 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/6ad46da139934cc4b3fe767037f1a8ac because midkey is the same as first or last row 2024-11-24T08:49:12,008 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-24T08:49:12,008 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:49:12,008 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/6ad46da139934cc4b3fe767037f1a8ac because midkey is the same as first or last row 2024-11-24T08:49:12,009 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:49:12,009 DEBUG [RS:0;469387a2cdb6:40901-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a20423784cc4152477ea73ae19a5d531:info 2024-11-24T08:49:12,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40901 {}] regionserver.HRegion(8855): Flush requested on a20423784cc4152477ea73ae19a5d531 2024-11-24T08:49:12,148 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a20423784cc4152477ea73ae19a5d531 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T08:49:12,156 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp/info/3725f755ddf04e1e870c29eefbddb84d is 1080, key is row0018/info:/1732438151928/Put/seqid=0 2024-11-24T08:49:12,158 WARN [Thread-1039 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1071 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:12,158 WARN [Thread-1039 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741887_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK], DatanodeInfoWithStorage[127.0.0.1:32883,DS-d060f363-beec-473d-bc0c-6a9e8a10d4c6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]) is bad. 2024-11-24T08:49:12,159 WARN [Thread-1039 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741887_1071 2024-11-24T08:49:12,159 WARN [Thread-1039 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK] 2024-11-24T08:49:12,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33675 is added to blk_1073741888_1072 (size=11421) 2024-11-24T08:49:12,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741888_1072 (size=11421) 2024-11-24T08:49:12,165 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp/info/3725f755ddf04e1e870c29eefbddb84d 2024-11-24T08:49:12,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/.tmp/info/3725f755ddf04e1e870c29eefbddb84d as hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/3725f755ddf04e1e870c29eefbddb84d 2024-11-24T08:49:12,183 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/3725f755ddf04e1e870c29eefbddb84d, entries=6, sequenceid=66, filesize=11.2 K 2024-11-24T08:49:12,184 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=0 B/0 for a20423784cc4152477ea73ae19a5d531 in 37ms, sequenceid=66, compaction requested=false 2024-11-24T08:49:12,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a20423784cc4152477ea73ae19a5d531: 2024-11-24T08:49:12,185 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.1 K, sizeToCheck=16.0 K 2024-11-24T08:49:12,185 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:49:12,185 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/6ad46da139934cc4b3fe767037f1a8ac because midkey is the same as first or last row 2024-11-24T08:49:12,267 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-24T08:49:12,267 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:12,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T08:49:12,349 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:49:12,350 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:49:12,350 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:49:12,351 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:49:12,351 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T08:49:12,351 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T08:49:12,351 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1996078098, stopped=false 2024-11-24T08:49:12,352 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=469387a2cdb6,39181,1732438114554 2024-11-24T08:49:12,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:49:12,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40901-0x10070eb1ba40001, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:49:12,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:12,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40901-0x10070eb1ba40001, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:12,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39681-0x10070eb1ba40002, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:49:12,355 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:49:12,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39681-0x10070eb1ba40002, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:12,356 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:49:12,357 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:49:12,357 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:49:12,357 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:49:12,358 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40901-0x10070eb1ba40001, quorum=127.0.0.1:58471, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:49:12,358 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '469387a2cdb6,40901,1732438114603' ***** 2024-11-24T08:49:12,358 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39681-0x10070eb1ba40002, quorum=127.0.0.1:58471, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:49:12,358 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T08:49:12,358 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '469387a2cdb6,39681,1732438115472' ***** 2024-11-24T08:49:12,358 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T08:49:12,358 INFO [RS:0;469387a2cdb6:40901 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T08:49:12,359 INFO [RS:1;469387a2cdb6:39681 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T08:49:12,359 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T08:49:12,359 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T08:49:12,359 INFO [RS:0;469387a2cdb6:40901 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T08:49:12,359 INFO [RS:1;469387a2cdb6:39681 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T08:49:12,359 INFO [RS:0;469387a2cdb6:40901 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T08:49:12,359 INFO [RS:1;469387a2cdb6:39681 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T08:49:12,359 INFO [RS:1;469387a2cdb6:39681 {}] regionserver.HRegionServer(959): stopping server 469387a2cdb6,39681,1732438115472 2024-11-24T08:49:12,359 INFO [RS:0;469387a2cdb6:40901 {}] regionserver.HRegionServer(3091): Received CLOSE for a20423784cc4152477ea73ae19a5d531 2024-11-24T08:49:12,359 INFO [RS:1;469387a2cdb6:39681 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:49:12,359 INFO [RS:1;469387a2cdb6:39681 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;469387a2cdb6:39681. 2024-11-24T08:49:12,359 DEBUG [RS:1;469387a2cdb6:39681 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:49:12,359 DEBUG [RS:1;469387a2cdb6:39681 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:49:12,359 INFO [RS:0;469387a2cdb6:40901 {}] regionserver.HRegionServer(959): stopping server 469387a2cdb6,40901,1732438114603 2024-11-24T08:49:12,359 INFO [RS:1;469387a2cdb6:39681 {}] regionserver.HRegionServer(976): stopping server 469387a2cdb6,39681,1732438115472; all regions closed. 2024-11-24T08:49:12,360 INFO [RS:0;469387a2cdb6:40901 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:49:12,360 INFO [RS:0;469387a2cdb6:40901 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;469387a2cdb6:40901. 2024-11-24T08:49:12,360 DEBUG [RS:0;469387a2cdb6:40901 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:49:12,360 DEBUG [RS:0;469387a2cdb6:40901 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:49:12,360 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:12,360 INFO [RS:0;469387a2cdb6:40901 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T08:49:12,360 INFO [RS:0;469387a2cdb6:40901 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T08:49:12,360 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:12,360 INFO [RS:0;469387a2cdb6:40901 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T08:49:12,360 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:12,360 INFO [RS:0;469387a2cdb6:40901 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T08:49:12,360 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:12,360 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:12,360 INFO [RS:0;469387a2cdb6:40901 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T08:49:12,360 DEBUG [RS:0;469387a2cdb6:40901 {}] regionserver.HRegionServer(1325): Online Regions={a20423784cc4152477ea73ae19a5d531=TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531., 1588230740=hbase:meta,,1.1588230740} 2024-11-24T08:49:12,360 DEBUG [RS:0;469387a2cdb6:40901 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a20423784cc4152477ea73ae19a5d531 2024-11-24T08:49:12,360 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:49:12,360 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a20423784cc4152477ea73ae19a5d531, disabling compactions & flushes 2024-11-24T08:49:12,360 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:49:12,360 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531. 2024-11-24T08:49:12,361 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:49:12,361 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531. 2024-11-24T08:49:12,361 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:12,361 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:49:12,361 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:49:12,361 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531. after waiting 0 ms 2024-11-24T08:49:12,361 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531. 2024-11-24T08:49:12,361 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:12,361 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 2024-11-24T08:49:12,361 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-24T08:49:12,361 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/d404d00f1cdc4f0b826a87c139e26f86, hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/b3bb6db5bec949eebd2e12e1593340a0, hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/ed17b39e368949b38eb1e9deabfa6554, hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/db3a66456e68492b85854d5a7ec27d5e, hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/4e64305f84ba496e9b5ead31d0d5d5ae, hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/8e1d92ff87884f9bb08aec511715cb99] to archive 2024-11-24T08:49:12,361 ERROR [FSHLog-0-hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77-prefix:469387a2cdb6,40901,1732438114603.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:12,361 WARN [FSHLog-0-hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77-prefix:469387a2cdb6,40901,1732438114603.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:12,361 WARN [IPC Server handler 4 on default port 36097 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 has not been closed. Lease recovery is in progress. RecoveryId = 1073 for block blk_1073741837_1013 2024-11-24T08:49:12,361 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 469387a2cdb6%2C40901%2C1732438114603.meta:.meta(num 1732438115380) roll requested 2024-11-24T08:49:12,362 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 after 0ms 2024-11-24T08:49:12,362 INFO [regionserver/469387a2cdb6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C40901%2C1732438114603.meta.1732438152362.meta 2024-11-24T08:49:12,362 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T08:49:12,364 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/d404d00f1cdc4f0b826a87c139e26f86 to hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/d404d00f1cdc4f0b826a87c139e26f86 2024-11-24T08:49:12,364 WARN [Thread-1045 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1074 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:12,364 WARN [Thread-1045 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741889_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK], DatanodeInfoWithStorage[127.0.0.1:33675,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]) is bad. 2024-11-24T08:49:12,364 WARN [Thread-1045 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741889_1074 2024-11-24T08:49:12,365 WARN [Thread-1045 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK] 2024-11-24T08:49:12,365 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/b3bb6db5bec949eebd2e12e1593340a0 to hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/b3bb6db5bec949eebd2e12e1593340a0 2024-11-24T08:49:12,367 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/ed17b39e368949b38eb1e9deabfa6554 to hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/ed17b39e368949b38eb1e9deabfa6554 2024-11-24T08:49:12,368 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/db3a66456e68492b85854d5a7ec27d5e to hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/db3a66456e68492b85854d5a7ec27d5e 2024-11-24T08:49:12,370 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/4e64305f84ba496e9b5ead31d0d5d5ae to hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/4e64305f84ba496e9b5ead31d0d5d5ae 2024-11-24T08:49:12,371 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/8e1d92ff87884f9bb08aec511715cb99 to hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/info/8e1d92ff87884f9bb08aec511715cb99 2024-11-24T08:49:12,372 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=469387a2cdb6:39181 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-24T08:49:12,372 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [d404d00f1cdc4f0b826a87c139e26f86=10347, b3bb6db5bec949eebd2e12e1593340a0=12506, ed17b39e368949b38eb1e9deabfa6554=17994, db3a66456e68492b85854d5a7ec27d5e=6027, 4e64305f84ba496e9b5ead31d0d5d5ae=6027, 8e1d92ff87884f9bb08aec511715cb99=11421] 2024-11-24T08:49:12,373 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:12,373 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:12,374 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:12,374 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:12,374 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:12,374 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438152362.meta 2024-11-24T08:49:12,377 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:12,377 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37885,DS-3eb6dc20-9e0a-4329-b72a-fdf7d6ec7850,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:12,377 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta 2024-11-24T08:49:12,377 WARN [IPC Server handler 1 on default port 36097 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta has not been closed. Lease recovery is in progress. RecoveryId = 1076 for block blk_1073741834_1010 2024-11-24T08:49:12,377 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45561:45561),(127.0.0.1/127.0.0.1:37687:37687)] 2024-11-24T08:49:12,377 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta is not closed yet, will try archiving it next time 2024-11-24T08:49:12,378 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta after 0ms 2024-11-24T08:49:12,378 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a20423784cc4152477ea73ae19a5d531/recovered.edits/69.seqid, newMaxSeqId=69, maxSeqId=1 2024-11-24T08:49:12,379 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531. 2024-11-24T08:49:12,379 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a20423784cc4152477ea73ae19a5d531: Waiting for close lock at 1732438152360Running coprocessor pre-close hooks at 1732438152360Disabling compacts and flushes for region at 1732438152360Disabling writes for close at 1732438152361 (+1 ms)Writing region close event to WAL at 1732438152373 (+12 ms)Running coprocessor post-close hooks at 1732438152379 (+6 ms)Closed at 1732438152379 2024-11-24T08:49:12,379 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531. 2024-11-24T08:49:12,393 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/hbase/meta/1588230740/.tmp/info/a2216c6b926a45cbabd5dfeb434e8b5d is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732438115566.a20423784cc4152477ea73ae19a5d531./info:regioninfo/1732438115939/Put/seqid=0 2024-11-24T08:49:12,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33675 is added to blk_1073741891_1077 (size=7089) 2024-11-24T08:49:12,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741891_1077 (size=7089) 2024-11-24T08:49:12,399 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/hbase/meta/1588230740/.tmp/info/a2216c6b926a45cbabd5dfeb434e8b5d 2024-11-24T08:49:12,419 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/hbase/meta/1588230740/.tmp/ns/7e1e419d7b934ae882a29572d6c5f753 is 43, key is default/ns:d/1732438115427/Put/seqid=0 2024-11-24T08:49:12,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741892_1078 (size=5153) 2024-11-24T08:49:12,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33675 is added to blk_1073741892_1078 (size=5153) 2024-11-24T08:49:12,425 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/hbase/meta/1588230740/.tmp/ns/7e1e419d7b934ae882a29572d6c5f753 2024-11-24T08:49:12,446 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/hbase/meta/1588230740/.tmp/table/1bf20ad178064ad7a8121d4911fd5d35 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732438115953/Put/seqid=0 2024-11-24T08:49:12,448 WARN [Thread-1064 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:12,448 WARN [Thread-1064 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677742332-172.17.0.2-1732438113484:blk_1073741893_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK], DatanodeInfoWithStorage[127.0.0.1:33675,DS-2a03935c-c1ea-4bc3-9dd6-bb2f068f12fe,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK]) is bad. 2024-11-24T08:49:12,448 WARN [Thread-1064 {}] hdfs.DataStreamer(1850): Abandoning BP-1677742332-172.17.0.2-1732438113484:blk_1073741893_1079 2024-11-24T08:49:12,449 WARN [Thread-1064 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34413,DS-eb4ea890-d79e-435c-9739-4e0565995c05,DISK] 2024-11-24T08:49:12,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33675 is added to blk_1073741894_1080 (size=5424) 2024-11-24T08:49:12,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741894_1080 (size=5424) 2024-11-24T08:49:12,454 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/hbase/meta/1588230740/.tmp/table/1bf20ad178064ad7a8121d4911fd5d35 2024-11-24T08:49:12,461 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/hbase/meta/1588230740/.tmp/info/a2216c6b926a45cbabd5dfeb434e8b5d as hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/hbase/meta/1588230740/info/a2216c6b926a45cbabd5dfeb434e8b5d 2024-11-24T08:49:12,467 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/hbase/meta/1588230740/info/a2216c6b926a45cbabd5dfeb434e8b5d, entries=10, sequenceid=11, filesize=6.9 K 2024-11-24T08:49:12,468 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/hbase/meta/1588230740/.tmp/ns/7e1e419d7b934ae882a29572d6c5f753 as hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/hbase/meta/1588230740/ns/7e1e419d7b934ae882a29572d6c5f753 2024-11-24T08:49:12,474 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/hbase/meta/1588230740/ns/7e1e419d7b934ae882a29572d6c5f753, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T08:49:12,476 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/hbase/meta/1588230740/.tmp/table/1bf20ad178064ad7a8121d4911fd5d35 as hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/hbase/meta/1588230740/table/1bf20ad178064ad7a8121d4911fd5d35 2024-11-24T08:49:12,482 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/hbase/meta/1588230740/table/1bf20ad178064ad7a8121d4911fd5d35, entries=2, sequenceid=11, filesize=5.3 K 2024-11-24T08:49:12,484 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 122ms, sequenceid=11, compaction requested=false 2024-11-24T08:49:12,489 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T08:49:12,490 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:49:12,490 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:49:12,490 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732438152360Running coprocessor pre-close hooks at 1732438152360Disabling compacts and flushes for region at 1732438152360Disabling writes for close at 1732438152361 (+1 ms)Obtaining lock to block concurrent updates at 1732438152361Preparing flush snapshotting stores in 1588230740 at 1732438152361Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732438152361Flushing stores of hbase:meta,,1.1588230740 at 1732438152378 (+17 ms)Flushing 1588230740/info: creating writer at 1732438152378Flushing 1588230740/info: appending metadata at 1732438152392 (+14 ms)Flushing 1588230740/info: closing flushed file at 1732438152392Flushing 1588230740/ns: creating writer at 1732438152405 (+13 ms)Flushing 1588230740/ns: appending metadata at 1732438152419 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732438152419Flushing 1588230740/table: creating writer at 1732438152432 (+13 ms)Flushing 1588230740/table: appending metadata at 1732438152446 (+14 ms)Flushing 1588230740/table: closing flushed file at 1732438152446Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@74873ba3: reopening flushed file at 1732438152459 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31cc11d: reopening flushed file at 1732438152467 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d238cb5: reopening flushed file at 1732438152475 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 122ms, sequenceid=11, compaction requested=false at 1732438152484 (+9 ms)Writing region close event to WAL at 1732438152485 (+1 ms)Running coprocessor post-close hooks at 1732438152489 (+4 ms)Closed at 1732438152490 (+1 ms) 2024-11-24T08:49:12,490 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T08:49:12,560 INFO [RS:0;469387a2cdb6:40901 {}] regionserver.HRegionServer(976): stopping server 469387a2cdb6,40901,1732438114603; all regions closed. 2024-11-24T08:49:12,561 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:12,561 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:12,562 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:12,562 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:12,562 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:12,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741890_1075 (size=825) 2024-11-24T08:49:12,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33675 is added to blk_1073741890_1075 (size=825) 2024-11-24T08:49:12,596 INFO [regionserver/469387a2cdb6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T08:49:12,597 INFO [regionserver/469387a2cdb6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T08:49:12,868 INFO [regionserver/469387a2cdb6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:49:12,873 INFO [regionserver/469387a2cdb6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T08:49:12,873 INFO [regionserver/469387a2cdb6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T08:49:13,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33675 is added to blk_1073741862_1045 (size=13591) 2024-11-24T08:49:13,536 INFO [regionserver/469387a2cdb6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:49:13,549 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5a48c848[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33675, datanodeUuid=dfe95277-a083-409b-81fc-9930f31c832c, infoPort=45561, infoSecurePort=0, ipcPort=38785, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484):Failed to transfer BP-1677742332-172.17.0.2-1732438113484:blk_1073741827_1003 to 127.0.0.1:34413 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:13,549 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2a6810ca[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33675, datanodeUuid=dfe95277-a083-409b-81fc-9930f31c832c, infoPort=45561, infoSecurePort=0, ipcPort=38785, storageInfo=lv=-57;cid=testClusterID;nsid=132196811;c=1732438113484):Failed to transfer BP-1677742332-172.17.0.2-1732438113484:blk_1073741829_1005 to 127.0.0.1:34413 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:14,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:49:15,318 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T08:49:15,320 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:49:15,320 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T08:49:15,573 INFO [master/469387a2cdb6:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T08:49:15,573 INFO [master/469387a2cdb6:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T08:49:16,364 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 after 4003ms 2024-11-24T08:49:16,379 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta after 4002ms 2024-11-24T08:49:16,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:49:16,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741835_1011 (size=393) 2024-11-24T08:49:16,566 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@55ee9c39 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1677742332-172.17.0.2-1732438113484:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:37885,null,null]) java.net.ConnectException: Call From 469387a2cdb6/172.17.0.2 to localhost:42701 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-24T08:49:17,361 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-24T08:49:17,367 DEBUG [RS:1;469387a2cdb6:39681 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/oldWALs 2024-11-24T08:49:17,367 INFO [RS:1;469387a2cdb6:39681 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 469387a2cdb6%2C39681%2C1732438115472:(num 1732438115667) 2024-11-24T08:49:17,367 DEBUG [RS:1;469387a2cdb6:39681 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:49:17,367 INFO [RS:1;469387a2cdb6:39681 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:49:17,368 INFO [RS:1;469387a2cdb6:39681 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:49:17,368 INFO [RS:1;469387a2cdb6:39681 {}] hbase.ChoreService(370): Chore service for: regionserver/469387a2cdb6:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T08:49:17,369 INFO [RS:1;469387a2cdb6:39681 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T08:49:17,369 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:49:17,369 INFO [RS:1;469387a2cdb6:39681 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T08:49:17,369 INFO [RS:1;469387a2cdb6:39681 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T08:49:17,369 INFO [RS:1;469387a2cdb6:39681 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:49:17,370 INFO [RS:1;469387a2cdb6:39681 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39681 2024-11-24T08:49:17,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:49:17,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39681-0x10070eb1ba40002, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/469387a2cdb6,39681,1732438115472 2024-11-24T08:49:17,373 INFO [RS:1;469387a2cdb6:39681 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:49:17,374 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [469387a2cdb6,39681,1732438115472] 2024-11-24T08:49:17,375 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/469387a2cdb6,39681,1732438115472 already deleted, retry=false 2024-11-24T08:49:17,375 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 469387a2cdb6,39681,1732438115472 expired; onlineServers=1 2024-11-24T08:49:17,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:17,395 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:17,395 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:17,395 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:17,395 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:17,396 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:17,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:17,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:17,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39681-0x10070eb1ba40002, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:49:17,475 INFO [RS:1;469387a2cdb6:39681 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:49:17,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39681-0x10070eb1ba40002, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:49:17,475 INFO [RS:1;469387a2cdb6:39681 {}] regionserver.HRegionServer(1031): Exiting; stopping=469387a2cdb6,39681,1732438115472; zookeeper connection closed. 2024-11-24T08:49:17,475 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@69ead637 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@69ead637 2024-11-24T08:49:17,562 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-24T08:49:17,568 DEBUG [RS:0;469387a2cdb6:40901 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/oldWALs 2024-11-24T08:49:17,569 INFO [RS:0;469387a2cdb6:40901 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 469387a2cdb6%2C40901%2C1732438114603.meta:.meta(num 1732438152362) 2024-11-24T08:49:17,569 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:17,570 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:17,570 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:17,570 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:17,570 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:17,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33675 is added to blk_1073741882_1066 (size=16308) 2024-11-24T08:49:17,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741882_1066 (size=16308) 2024-11-24T08:49:17,577 DEBUG [RS:0;469387a2cdb6:40901 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/oldWALs 2024-11-24T08:49:17,577 INFO [RS:0;469387a2cdb6:40901 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 469387a2cdb6%2C40901%2C1732438114603:(num 1732438151903) 2024-11-24T08:49:17,577 DEBUG [RS:0;469387a2cdb6:40901 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:49:17,577 INFO [RS:0;469387a2cdb6:40901 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:49:17,577 INFO [RS:0;469387a2cdb6:40901 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:49:17,577 INFO [RS:0;469387a2cdb6:40901 {}] hbase.ChoreService(370): Chore service for: regionserver/469387a2cdb6:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T08:49:17,577 INFO [RS:0;469387a2cdb6:40901 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:49:17,577 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:49:17,577 INFO [RS:0;469387a2cdb6:40901 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40901 2024-11-24T08:49:17,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40901-0x10070eb1ba40001, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/469387a2cdb6,40901,1732438114603 2024-11-24T08:49:17,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:49:17,579 INFO [RS:0;469387a2cdb6:40901 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:49:17,579 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [469387a2cdb6,40901,1732438114603] 2024-11-24T08:49:17,580 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/469387a2cdb6,40901,1732438114603 already deleted, retry=false 2024-11-24T08:49:17,580 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 469387a2cdb6,40901,1732438114603 expired; onlineServers=0 2024-11-24T08:49:17,580 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '469387a2cdb6,39181,1732438114554' ***** 2024-11-24T08:49:17,580 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T08:49:17,580 INFO [M:0;469387a2cdb6:39181 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:49:17,580 INFO [M:0;469387a2cdb6:39181 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:49:17,580 DEBUG [M:0;469387a2cdb6:39181 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T08:49:17,581 DEBUG [M:0;469387a2cdb6:39181 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T08:49:17,581 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T08:49:17,581 DEBUG [master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.small.0-1732438114767 {}] cleaner.HFileCleaner(306): Exit Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.small.0-1732438114767,5,FailOnTimeoutGroup] 2024-11-24T08:49:17,581 DEBUG [master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.large.0-1732438114767 {}] cleaner.HFileCleaner(306): Exit Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.large.0-1732438114767,5,FailOnTimeoutGroup] 2024-11-24T08:49:17,581 INFO [M:0;469387a2cdb6:39181 {}] hbase.ChoreService(370): Chore service for: master/469387a2cdb6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T08:49:17,581 INFO [M:0;469387a2cdb6:39181 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:49:17,581 DEBUG [M:0;469387a2cdb6:39181 {}] master.HMaster(1795): Stopping service threads 2024-11-24T08:49:17,581 INFO [M:0;469387a2cdb6:39181 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T08:49:17,581 INFO [M:0;469387a2cdb6:39181 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:49:17,581 INFO [M:0;469387a2cdb6:39181 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T08:49:17,581 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T08:49:17,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T08:49:17,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:17,582 DEBUG [M:0;469387a2cdb6:39181 {}] zookeeper.ZKUtil(347): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T08:49:17,582 WARN [M:0;469387a2cdb6:39181 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T08:49:17,582 INFO [M:0;469387a2cdb6:39181 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/.lastflushedseqids 2024-11-24T08:49:17,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33675 is added to blk_1073741895_1081 (size=130) 2024-11-24T08:49:17,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741895_1081 (size=130) 2024-11-24T08:49:17,588 INFO [M:0;469387a2cdb6:39181 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T08:49:17,588 INFO [M:0;469387a2cdb6:39181 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T08:49:17,588 DEBUG [M:0;469387a2cdb6:39181 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:49:17,588 INFO [M:0;469387a2cdb6:39181 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:49:17,588 DEBUG [M:0;469387a2cdb6:39181 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:49:17,588 DEBUG [M:0;469387a2cdb6:39181 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:49:17,588 DEBUG [M:0;469387a2cdb6:39181 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:49:17,588 INFO [M:0;469387a2cdb6:39181 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-24T08:49:17,604 DEBUG [M:0;469387a2cdb6:39181 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/01bb0d5941de489eafff2fb7ac2b6f00 is 82, key is hbase:meta,,1/info:regioninfo/1732438115412/Put/seqid=0 2024-11-24T08:49:17,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33675 is added to blk_1073741896_1082 (size=5672) 2024-11-24T08:49:17,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741896_1082 (size=5672) 2024-11-24T08:49:17,610 INFO [M:0;469387a2cdb6:39181 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/01bb0d5941de489eafff2fb7ac2b6f00 2024-11-24T08:49:17,630 DEBUG [M:0;469387a2cdb6:39181 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c93e74aa6d9d41058e2fb6e3df648ab7 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732438115961/Put/seqid=0 2024-11-24T08:49:17,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33675 is added to blk_1073741897_1083 (size=6255) 2024-11-24T08:49:17,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741897_1083 (size=6255) 2024-11-24T08:49:17,635 INFO [M:0;469387a2cdb6:39181 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c93e74aa6d9d41058e2fb6e3df648ab7 2024-11-24T08:49:17,640 INFO [M:0;469387a2cdb6:39181 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c93e74aa6d9d41058e2fb6e3df648ab7 2024-11-24T08:49:17,657 DEBUG [M:0;469387a2cdb6:39181 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f7d1fcfd78a244d0834307754495f7d7 is 69, key is 469387a2cdb6,39681,1732438115472/rs:state/1732438115518/Put/seqid=0 2024-11-24T08:49:17,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741898_1084 (size=5224) 2024-11-24T08:49:17,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33675 is added to blk_1073741898_1084 (size=5224) 2024-11-24T08:49:17,663 INFO [M:0;469387a2cdb6:39181 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f7d1fcfd78a244d0834307754495f7d7 2024-11-24T08:49:17,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40901-0x10070eb1ba40001, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:49:17,680 INFO [RS:0;469387a2cdb6:40901 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:49:17,680 INFO [RS:0;469387a2cdb6:40901 {}] regionserver.HRegionServer(1031): Exiting; stopping=469387a2cdb6,40901,1732438114603; zookeeper connection closed. 2024-11-24T08:49:17,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40901-0x10070eb1ba40001, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:49:17,680 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@175ebc4b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@175ebc4b 2024-11-24T08:49:17,680 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-24T08:49:17,688 DEBUG [M:0;469387a2cdb6:39181 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4de181083f5f43a693ec432eee5f82e8 is 52, key is load_balancer_on/state:d/1732438115456/Put/seqid=0 2024-11-24T08:49:17,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741899_1085 (size=5056) 2024-11-24T08:49:17,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33675 is added to blk_1073741899_1085 (size=5056) 2024-11-24T08:49:17,694 INFO [M:0;469387a2cdb6:39181 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4de181083f5f43a693ec432eee5f82e8 2024-11-24T08:49:17,699 DEBUG [M:0;469387a2cdb6:39181 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/01bb0d5941de489eafff2fb7ac2b6f00 as hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/01bb0d5941de489eafff2fb7ac2b6f00 2024-11-24T08:49:17,704 INFO [M:0;469387a2cdb6:39181 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/01bb0d5941de489eafff2fb7ac2b6f00, entries=8, sequenceid=60, filesize=5.5 K 2024-11-24T08:49:17,705 DEBUG [M:0;469387a2cdb6:39181 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c93e74aa6d9d41058e2fb6e3df648ab7 as hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c93e74aa6d9d41058e2fb6e3df648ab7 2024-11-24T08:49:17,711 INFO [M:0;469387a2cdb6:39181 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c93e74aa6d9d41058e2fb6e3df648ab7 2024-11-24T08:49:17,711 INFO [M:0;469387a2cdb6:39181 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c93e74aa6d9d41058e2fb6e3df648ab7, entries=6, sequenceid=60, filesize=6.1 K 2024-11-24T08:49:17,712 DEBUG [M:0;469387a2cdb6:39181 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f7d1fcfd78a244d0834307754495f7d7 as hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f7d1fcfd78a244d0834307754495f7d7 2024-11-24T08:49:17,717 INFO [M:0;469387a2cdb6:39181 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f7d1fcfd78a244d0834307754495f7d7, entries=2, sequenceid=60, filesize=5.1 K 2024-11-24T08:49:17,718 DEBUG [M:0;469387a2cdb6:39181 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4de181083f5f43a693ec432eee5f82e8 as hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4de181083f5f43a693ec432eee5f82e8 2024-11-24T08:49:17,723 INFO [M:0;469387a2cdb6:39181 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4de181083f5f43a693ec432eee5f82e8, entries=1, sequenceid=60, filesize=4.9 K 2024-11-24T08:49:17,725 INFO [M:0;469387a2cdb6:39181 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 137ms, sequenceid=60, compaction requested=false 2024-11-24T08:49:17,726 INFO [M:0;469387a2cdb6:39181 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:49:17,726 DEBUG [M:0;469387a2cdb6:39181 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732438157588Disabling compacts and flushes for region at 1732438157588Disabling writes for close at 1732438157588Obtaining lock to block concurrent updates at 1732438157588Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732438157588Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1732438157589 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732438157589Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732438157589Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732438157603 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732438157603Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732438157615 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732438157629 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732438157629Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732438157640 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732438157657 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732438157657Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732438157669 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732438157688 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732438157688Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e4fcf9f: reopening flushed file at 1732438157698 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e448c6f: reopening flushed file at 1732438157704 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@71fab533: reopening flushed file at 1732438157711 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5b9fc6d4: reopening flushed file at 1732438157717 (+6 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 137ms, sequenceid=60, compaction requested=false at 1732438157725 (+8 ms)Writing region close event to WAL at 1732438157726 (+1 ms)Closed at 1732438157726 2024-11-24T08:49:17,727 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:17,727 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:17,727 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:17,727 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:17,727 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:17,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33675 is added to blk_1073741880_1063 (size=1045) 2024-11-24T08:49:17,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741880_1063 (size=1045) 2024-11-24T08:49:17,730 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:49:17,730 INFO [M:0;469387a2cdb6:39181 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T08:49:17,730 INFO [M:0;469387a2cdb6:39181 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39181 2024-11-24T08:49:17,731 INFO [M:0;469387a2cdb6:39181 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:49:17,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:49:17,832 INFO [M:0;469387a2cdb6:39181 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:49:17,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39181-0x10070eb1ba40000, quorum=127.0.0.1:58471, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:49:17,837 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5a60e80d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:49:17,837 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3a864bf5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:49:17,838 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:49:17,838 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55dafd43{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:49:17,838 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b392de9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/hadoop.log.dir/,STOPPED} 2024-11-24T08:49:17,839 WARN [BP-1677742332-172.17.0.2-1732438113484 heartbeating to localhost/127.0.0.1:36097 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:49:17,839 WARN [BP-1677742332-172.17.0.2-1732438113484 heartbeating to localhost/127.0.0.1:36097 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1677742332-172.17.0.2-1732438113484 (Datanode Uuid dfe95277-a083-409b-81fc-9930f31c832c) service to localhost/127.0.0.1:36097 2024-11-24T08:49:17,839 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:49:17,839 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:49:17,839 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@36fe8747 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1677742332-172.17.0.2-1732438113484:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:37885,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:42701 , LocalHost:localPort 469387a2cdb6/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-24T08:49:17,839 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@36fe8747 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1677742332-172.17.0.2-1732438113484:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:33675,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1677742332-172.17.0.2-1732438113484 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:17,840 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data3/current/BP-1677742332-172.17.0.2-1732438113484 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:49:17,840 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@36fe8747 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1677742332-172.17.0.2-1732438113484:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:37885,null,null], DatanodeInfoWithStorage[127.0.0.1:33675,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1677742332-172.17.0.2-1732438113484:blk_1073741837_1013, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:37885,null,null], DatanodeInfoWithStorage[127.0.0.1:33675,null,null]] 2024-11-24T08:49:17,840 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@36fe8747 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1677742332-172.17.0.2-1732438113484:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:37885,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1677742332-172.17.0.2-1732438113484 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:17,840 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@36fe8747 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1677742332-172.17.0.2-1732438113484:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:33675,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1677742332-172.17.0.2-1732438113484 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:17,840 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data4/current/BP-1677742332-172.17.0.2-1732438113484 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:49:17,840 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@36fe8747 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1677742332-172.17.0.2-1732438113484:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:37885,null,null], DatanodeInfoWithStorage[127.0.0.1:33675,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1677742332-172.17.0.2-1732438113484:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:37885,null,null], DatanodeInfoWithStorage[127.0.0.1:33675,null,null]] 2024-11-24T08:49:17,840 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:49:17,844 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@26f4ea5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:49:17,845 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@61ab06b3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:49:17,845 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:49:17,845 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70095cb0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:49:17,845 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d11ae98{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/hadoop.log.dir/,STOPPED} 2024-11-24T08:49:17,846 WARN [BP-1677742332-172.17.0.2-1732438113484 heartbeating to localhost/127.0.0.1:36097 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:49:17,846 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:49:17,846 WARN [BP-1677742332-172.17.0.2-1732438113484 heartbeating to localhost/127.0.0.1:36097 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1677742332-172.17.0.2-1732438113484 (Datanode Uuid ef0fea69-0c34-4c33-aa8d-5dddb5f2e3d7) service to localhost/127.0.0.1:36097 2024-11-24T08:49:17,846 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:49:17,847 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data9/current/BP-1677742332-172.17.0.2-1732438113484 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:49:17,847 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/cluster_b2dbaa4d-5c2d-16a2-7ab4-92548a059198/data/data10/current/BP-1677742332-172.17.0.2-1732438113484 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:49:17,847 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:49:17,852 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@410b0043{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:49:17,852 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@63fdee04{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:49:17,852 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:49:17,852 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@9fcb45e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:49:17,852 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@29ce44bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/hadoop.log.dir/,STOPPED} 2024-11-24T08:49:17,859 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T08:49:17,890 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T08:49:17,896 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=152 (was 78) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:36097 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:36097 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:36097 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36097 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:36097 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35981 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36097 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:35981 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:36097 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36097 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007f98d4bf5b70.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007f98d4bf5b70.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36097 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36097 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:36097 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=107 (was 131), ProcessCount=11 (was 11), AvailableMemoryMB=1689 (was 2261) 2024-11-24T08:49:17,903 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=152, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=107, ProcessCount=11, AvailableMemoryMB=1689 2024-11-24T08:49:17,903 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T08:49:17,903 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/hadoop.log.dir so I do NOT create it in target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb 2024-11-24T08:49:17,903 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c38d7466-fb98-a692-d97a-9fbdd8f81137/hadoop.tmp.dir so I do NOT create it in target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb 2024-11-24T08:49:17,903 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/cluster_415cfaf0-356b-7498-76a9-57da45216436, deleteOnExit=true 2024-11-24T08:49:17,903 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T08:49:17,904 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/test.cache.data in system properties and HBase conf 2024-11-24T08:49:17,904 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T08:49:17,904 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/hadoop.log.dir in system properties and HBase conf 2024-11-24T08:49:17,904 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T08:49:17,904 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T08:49:17,904 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T08:49:17,904 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T08:49:17,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:49:17,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:49:17,905 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T08:49:17,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T08:49:17,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:49:17,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T08:49:17,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T08:49:17,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:49:17,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:49:17,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T08:49:17,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/nfs.dump.dir in system properties and HBase conf 2024-11-24T08:49:17,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/java.io.tmpdir in system properties and HBase conf 2024-11-24T08:49:17,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:49:17,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T08:49:17,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T08:49:17,922 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:49:17,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:17,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:17,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:17,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:17,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:17,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:17,929 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:17,929 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:17,929 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:17,932 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:17,983 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:49:17,987 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:49:17,988 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:49:17,988 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:49:17,988 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:49:17,989 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:49:17,989 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4284e6e3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:49:17,989 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@500b485f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:49:18,083 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@295c5a84{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/java.io.tmpdir/jetty-localhost-33221-hadoop-hdfs-3_4_1-tests_jar-_-any-13825727713002607401/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:49:18,084 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@42450b2b{HTTP/1.1, (http/1.1)}{localhost:33221} 2024-11-24T08:49:18,084 INFO [Time-limited test {}] server.Server(415): Started @148701ms 2024-11-24T08:49:18,100 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:49:18,144 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:49:18,146 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:49:18,149 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:49:18,149 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:49:18,149 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:49:18,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@559a021a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:49:18,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3409b3ba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:49:18,244 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7bc967ae{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/java.io.tmpdir/jetty-localhost-45479-hadoop-hdfs-3_4_1-tests_jar-_-any-6689344901439645595/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:49:18,244 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@16962f7c{HTTP/1.1, (http/1.1)}{localhost:45479} 2024-11-24T08:49:18,245 INFO [Time-limited test {}] server.Server(415): Started @148862ms 2024-11-24T08:49:18,246 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:49:18,271 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:49:18,275 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:49:18,276 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:49:18,276 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:49:18,276 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:49:18,277 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e024519{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:49:18,277 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e0a81cd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:49:18,305 WARN [Thread-1183 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/cluster_415cfaf0-356b-7498-76a9-57da45216436/data/data1/current/BP-590959869-172.17.0.2-1732438157948/current, will proceed with Du for space computation calculation, 2024-11-24T08:49:18,305 WARN [Thread-1184 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/cluster_415cfaf0-356b-7498-76a9-57da45216436/data/data2/current/BP-590959869-172.17.0.2-1732438157948/current, will proceed with Du for space computation calculation, 2024-11-24T08:49:18,323 WARN [Thread-1162 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:49:18,326 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf0a7ed50a5832942 with lease ID 0x8f522ae550721694: Processing first storage report for DS-bce299a1-4174-4a22-b417-838e45f8cdbb from datanode DatanodeRegistration(127.0.0.1:36127, datanodeUuid=7a87b268-3c7d-4052-ae7d-66aa3b889c69, infoPort=42889, infoSecurePort=0, ipcPort=38191, storageInfo=lv=-57;cid=testClusterID;nsid=479861250;c=1732438157948) 2024-11-24T08:49:18,326 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf0a7ed50a5832942 with lease ID 0x8f522ae550721694: from storage DS-bce299a1-4174-4a22-b417-838e45f8cdbb node DatanodeRegistration(127.0.0.1:36127, datanodeUuid=7a87b268-3c7d-4052-ae7d-66aa3b889c69, infoPort=42889, infoSecurePort=0, ipcPort=38191, storageInfo=lv=-57;cid=testClusterID;nsid=479861250;c=1732438157948), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:49:18,326 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf0a7ed50a5832942 with lease ID 0x8f522ae550721694: Processing first storage report for DS-a1487033-4466-419b-9411-eb283096a660 from datanode DatanodeRegistration(127.0.0.1:36127, datanodeUuid=7a87b268-3c7d-4052-ae7d-66aa3b889c69, infoPort=42889, infoSecurePort=0, ipcPort=38191, storageInfo=lv=-57;cid=testClusterID;nsid=479861250;c=1732438157948) 2024-11-24T08:49:18,326 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf0a7ed50a5832942 with lease ID 0x8f522ae550721694: from storage DS-a1487033-4466-419b-9411-eb283096a660 node DatanodeRegistration(127.0.0.1:36127, datanodeUuid=7a87b268-3c7d-4052-ae7d-66aa3b889c69, infoPort=42889, infoSecurePort=0, ipcPort=38191, storageInfo=lv=-57;cid=testClusterID;nsid=479861250;c=1732438157948), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T08:49:18,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:18,380 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@182fe99a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/java.io.tmpdir/jetty-localhost-38845-hadoop-hdfs-3_4_1-tests_jar-_-any-9995318415773937026/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:49:18,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:18,381 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3df8f3f0{HTTP/1.1, (http/1.1)}{localhost:38845} 2024-11-24T08:49:18,381 INFO [Time-limited test {}] server.Server(415): Started @148998ms 2024-11-24T08:49:18,382 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:49:18,438 WARN [Thread-1209 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/cluster_415cfaf0-356b-7498-76a9-57da45216436/data/data3/current/BP-590959869-172.17.0.2-1732438157948/current, will proceed with Du for space computation calculation, 2024-11-24T08:49:18,438 WARN [Thread-1210 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/cluster_415cfaf0-356b-7498-76a9-57da45216436/data/data4/current/BP-590959869-172.17.0.2-1732438157948/current, will proceed with Du for space computation calculation, 2024-11-24T08:49:18,454 WARN [Thread-1198 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:49:18,456 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x35e46b5a1a3a10b9 with lease ID 0x8f522ae550721695: Processing first storage report for DS-3567303f-bbfc-400b-9af1-2c28f5ff7090 from datanode DatanodeRegistration(127.0.0.1:46299, datanodeUuid=4ab5b28e-fd22-45e1-bb99-28e8f51d553b, infoPort=42055, infoSecurePort=0, ipcPort=35519, storageInfo=lv=-57;cid=testClusterID;nsid=479861250;c=1732438157948) 2024-11-24T08:49:18,457 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x35e46b5a1a3a10b9 with lease ID 0x8f522ae550721695: from storage DS-3567303f-bbfc-400b-9af1-2c28f5ff7090 node DatanodeRegistration(127.0.0.1:46299, datanodeUuid=4ab5b28e-fd22-45e1-bb99-28e8f51d553b, infoPort=42055, infoSecurePort=0, ipcPort=35519, storageInfo=lv=-57;cid=testClusterID;nsid=479861250;c=1732438157948), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:49:18,457 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x35e46b5a1a3a10b9 with lease ID 0x8f522ae550721695: Processing first storage report for DS-7f14f266-0fe7-4a57-8405-e7ed6b9a0f23 from datanode DatanodeRegistration(127.0.0.1:46299, datanodeUuid=4ab5b28e-fd22-45e1-bb99-28e8f51d553b, infoPort=42055, infoSecurePort=0, ipcPort=35519, storageInfo=lv=-57;cid=testClusterID;nsid=479861250;c=1732438157948) 2024-11-24T08:49:18,457 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x35e46b5a1a3a10b9 with lease ID 0x8f522ae550721695: from storage DS-7f14f266-0fe7-4a57-8405-e7ed6b9a0f23 node DatanodeRegistration(127.0.0.1:46299, datanodeUuid=4ab5b28e-fd22-45e1-bb99-28e8f51d553b, infoPort=42055, infoSecurePort=0, ipcPort=35519, storageInfo=lv=-57;cid=testClusterID;nsid=479861250;c=1732438157948), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:49:18,509 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb 2024-11-24T08:49:18,512 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/cluster_415cfaf0-356b-7498-76a9-57da45216436/zookeeper_0, clientPort=52517, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/cluster_415cfaf0-356b-7498-76a9-57da45216436/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/cluster_415cfaf0-356b-7498-76a9-57da45216436/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T08:49:18,513 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52517 2024-11-24T08:49:18,513 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:49:18,515 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:49:18,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36127 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:49:18,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46299 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:49:18,528 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55 with version=8 2024-11-24T08:49:18,528 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/hbase-staging 2024-11-24T08:49:18,531 INFO [Time-limited test {}] client.ConnectionUtils(128): master/469387a2cdb6:0 server-side Connection retries=45 2024-11-24T08:49:18,531 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:49:18,531 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:49:18,531 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:49:18,531 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:49:18,531 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:49:18,531 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T08:49:18,531 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:49:18,532 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46577 2024-11-24T08:49:18,534 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46577 connecting to ZooKeeper ensemble=127.0.0.1:52517 2024-11-24T08:49:18,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:465770x0, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:49:18,537 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46577-0x10070ebc91e0000 connected 2024-11-24T08:49:18,552 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:49:18,553 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:49:18,555 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:49:18,555 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55, hbase.cluster.distributed=false 2024-11-24T08:49:18,557 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:49:18,557 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46577 2024-11-24T08:49:18,558 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46577 2024-11-24T08:49:18,561 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46577 2024-11-24T08:49:18,562 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46577 2024-11-24T08:49:18,562 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46577 2024-11-24T08:49:18,575 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/469387a2cdb6:0 server-side Connection retries=45 2024-11-24T08:49:18,576 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:49:18,576 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:49:18,576 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:49:18,576 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:49:18,576 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:49:18,576 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T08:49:18,576 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:49:18,577 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45991 2024-11-24T08:49:18,578 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45991 connecting to ZooKeeper ensemble=127.0.0.1:52517 2024-11-24T08:49:18,579 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:49:18,580 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:49:18,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:459910x0, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:49:18,584 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:459910x0, quorum=127.0.0.1:52517, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:49:18,584 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45991-0x10070ebc91e0001 connected 2024-11-24T08:49:18,584 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T08:49:18,585 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T08:49:18,585 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45991-0x10070ebc91e0001, quorum=127.0.0.1:52517, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T08:49:18,586 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45991-0x10070ebc91e0001, quorum=127.0.0.1:52517, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:49:18,586 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45991 2024-11-24T08:49:18,587 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45991 2024-11-24T08:49:18,589 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45991 2024-11-24T08:49:18,591 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45991 2024-11-24T08:49:18,591 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45991 2024-11-24T08:49:18,601 DEBUG [M:0;469387a2cdb6:46577 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;469387a2cdb6:46577 2024-11-24T08:49:18,602 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/469387a2cdb6,46577,1732438158530 2024-11-24T08:49:18,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:49:18,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45991-0x10070ebc91e0001, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:49:18,603 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/469387a2cdb6,46577,1732438158530 2024-11-24T08:49:18,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45991-0x10070ebc91e0001, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T08:49:18,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:18,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45991-0x10070ebc91e0001, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:18,605 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T08:49:18,605 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/469387a2cdb6,46577,1732438158530 from backup master directory 2024-11-24T08:49:18,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/469387a2cdb6,46577,1732438158530 2024-11-24T08:49:18,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45991-0x10070ebc91e0001, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:49:18,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:49:18,606 WARN [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:49:18,606 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=469387a2cdb6,46577,1732438158530 2024-11-24T08:49:18,610 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/hbase.id] with ID: 40c5adcc-3023-410f-9ce9-cd57d84aeab1 2024-11-24T08:49:18,610 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/.tmp/hbase.id 2024-11-24T08:49:18,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36127 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:49:18,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46299 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:49:18,618 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/.tmp/hbase.id]:[hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/hbase.id] 2024-11-24T08:49:18,630 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:49:18,630 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T08:49:18,631 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-24T08:49:18,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45991-0x10070ebc91e0001, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:18,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:18,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46299 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:49:18,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36127 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:49:18,640 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:49:18,641 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T08:49:18,641 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:49:18,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46299 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:49:18,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36127 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:49:18,649 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store 2024-11-24T08:49:18,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36127 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:49:18,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46299 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:49:18,656 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:49:18,656 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:49:18,656 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:49:18,656 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:49:18,656 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:49:18,656 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:49:18,656 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:49:18,656 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732438158656Disabling compacts and flushes for region at 1732438158656Disabling writes for close at 1732438158656Writing region close event to WAL at 1732438158656Closed at 1732438158656 2024-11-24T08:49:18,657 WARN [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/.initializing 2024-11-24T08:49:18,657 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/WALs/469387a2cdb6,46577,1732438158530 2024-11-24T08:49:18,659 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=469387a2cdb6%2C46577%2C1732438158530, suffix=, logDir=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/WALs/469387a2cdb6,46577,1732438158530, archiveDir=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/oldWALs, maxLogs=10 2024-11-24T08:49:18,660 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C46577%2C1732438158530.1732438158660 2024-11-24T08:49:18,664 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/WALs/469387a2cdb6,46577,1732438158530/469387a2cdb6%2C46577%2C1732438158530.1732438158660 2024-11-24T08:49:18,669 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42889:42889),(127.0.0.1/127.0.0.1:42055:42055)] 2024-11-24T08:49:18,670 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:49:18,670 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:49:18,670 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:18,670 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:18,671 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:18,673 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T08:49:18,673 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:18,674 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:49:18,674 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:18,675 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T08:49:18,675 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:18,675 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:49:18,676 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:18,677 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T08:49:18,677 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:18,677 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:49:18,678 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:18,679 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T08:49:18,679 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:18,679 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:49:18,679 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:18,680 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:18,680 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:18,681 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:18,681 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:18,682 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T08:49:18,683 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:18,685 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:49:18,685 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=692982, jitterRate=-0.11882823705673218}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T08:49:18,686 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732438158670Initializing all the Stores at 1732438158671 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438158671Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438158671Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438158671Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438158671Cleaning up temporary data from old regions at 1732438158681 (+10 ms)Region opened successfully at 1732438158686 (+5 ms) 2024-11-24T08:49:18,688 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T08:49:18,692 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11d92f9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=469387a2cdb6/172.17.0.2:0 2024-11-24T08:49:18,693 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T08:49:18,693 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T08:49:18,693 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T08:49:18,693 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T08:49:18,694 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T08:49:18,694 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T08:49:18,694 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T08:49:18,696 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T08:49:18,697 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T08:49:18,697 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T08:49:18,698 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T08:49:18,698 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T08:49:18,699 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T08:49:18,699 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T08:49:18,700 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T08:49:18,701 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T08:49:18,702 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T08:49:18,703 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T08:49:18,705 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T08:49:18,706 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T08:49:18,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:49:18,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45991-0x10070ebc91e0001, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:49:18,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:18,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45991-0x10070ebc91e0001, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:18,708 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=469387a2cdb6,46577,1732438158530, sessionid=0x10070ebc91e0000, setting cluster-up flag (Was=false) 2024-11-24T08:49:18,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:18,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45991-0x10070ebc91e0001, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:18,711 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T08:49:18,712 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=469387a2cdb6,46577,1732438158530 2024-11-24T08:49:18,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45991-0x10070ebc91e0001, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:18,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:18,717 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T08:49:18,718 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=469387a2cdb6,46577,1732438158530 2024-11-24T08:49:18,719 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T08:49:18,720 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T08:49:18,720 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T08:49:18,721 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T08:49:18,721 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 469387a2cdb6,46577,1732438158530 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T08:49:18,722 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:49:18,722 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:49:18,722 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:49:18,722 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:49:18,722 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/469387a2cdb6:0, corePoolSize=10, maxPoolSize=10 2024-11-24T08:49:18,722 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:18,722 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/469387a2cdb6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:49:18,722 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:18,723 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732438188723 2024-11-24T08:49:18,723 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T08:49:18,724 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T08:49:18,724 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T08:49:18,724 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T08:49:18,724 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T08:49:18,724 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T08:49:18,724 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:18,724 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:49:18,724 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T08:49:18,724 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T08:49:18,725 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T08:49:18,725 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T08:49:18,725 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T08:49:18,725 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T08:49:18,725 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:18,725 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.large.0-1732438158725,5,FailOnTimeoutGroup] 2024-11-24T08:49:18,726 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.small.0-1732438158725,5,FailOnTimeoutGroup] 2024-11-24T08:49:18,726 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:18,725 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T08:49:18,726 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T08:49:18,726 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:18,726 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:18,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36127 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:49:18,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46299 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:49:18,733 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T08:49:18,733 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55 2024-11-24T08:49:18,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46299 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:49:18,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36127 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:49:18,740 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:49:18,741 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:49:18,742 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:49:18,742 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:18,743 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:49:18,743 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:49:18,744 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:49:18,744 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:18,745 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:49:18,745 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:49:18,746 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:49:18,746 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:18,747 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:49:18,747 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:49:18,748 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:49:18,748 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:18,748 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:49:18,748 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:49:18,749 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/hbase/meta/1588230740 2024-11-24T08:49:18,749 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/hbase/meta/1588230740 2024-11-24T08:49:18,751 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:49:18,751 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:49:18,751 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:49:18,752 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:49:18,754 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:49:18,755 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=799635, jitterRate=0.016788557171821594}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:49:18,755 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732438158740Initializing all the Stores at 1732438158741 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438158741Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438158741Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438158741Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438158741Cleaning up temporary data from old regions at 1732438158751 (+10 ms)Region opened successfully at 1732438158755 (+4 ms) 2024-11-24T08:49:18,755 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:49:18,755 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:49:18,755 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:49:18,755 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:49:18,756 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:49:18,756 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:49:18,756 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732438158755Disabling compacts and flushes for region at 1732438158755Disabling writes for close at 1732438158756 (+1 ms)Writing region close event to WAL at 1732438158756Closed at 1732438158756 2024-11-24T08:49:18,757 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:49:18,757 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T08:49:18,757 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T08:49:18,759 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:49:18,760 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T08:49:18,793 INFO [RS:0;469387a2cdb6:45991 {}] regionserver.HRegionServer(746): ClusterId : 40c5adcc-3023-410f-9ce9-cd57d84aeab1 2024-11-24T08:49:18,793 DEBUG [RS:0;469387a2cdb6:45991 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T08:49:18,795 DEBUG [RS:0;469387a2cdb6:45991 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T08:49:18,795 DEBUG [RS:0;469387a2cdb6:45991 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T08:49:18,796 DEBUG [RS:0;469387a2cdb6:45991 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T08:49:18,796 DEBUG [RS:0;469387a2cdb6:45991 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ffef769, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=469387a2cdb6/172.17.0.2:0 2024-11-24T08:49:18,807 DEBUG [RS:0;469387a2cdb6:45991 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;469387a2cdb6:45991 2024-11-24T08:49:18,807 INFO [RS:0;469387a2cdb6:45991 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T08:49:18,807 INFO [RS:0;469387a2cdb6:45991 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T08:49:18,807 DEBUG [RS:0;469387a2cdb6:45991 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T08:49:18,808 INFO [RS:0;469387a2cdb6:45991 {}] regionserver.HRegionServer(2659): reportForDuty to master=469387a2cdb6,46577,1732438158530 with port=45991, startcode=1732438158575 2024-11-24T08:49:18,808 DEBUG [RS:0;469387a2cdb6:45991 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T08:49:18,810 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49743, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T08:49:18,811 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46577 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 469387a2cdb6,45991,1732438158575 2024-11-24T08:49:18,811 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46577 {}] master.ServerManager(517): Registering regionserver=469387a2cdb6,45991,1732438158575 2024-11-24T08:49:18,813 DEBUG [RS:0;469387a2cdb6:45991 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55 2024-11-24T08:49:18,813 DEBUG [RS:0;469387a2cdb6:45991 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38481 2024-11-24T08:49:18,813 DEBUG [RS:0;469387a2cdb6:45991 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T08:49:18,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:49:18,814 DEBUG [RS:0;469387a2cdb6:45991 {}] zookeeper.ZKUtil(111): regionserver:45991-0x10070ebc91e0001, quorum=127.0.0.1:52517, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/469387a2cdb6,45991,1732438158575 2024-11-24T08:49:18,814 WARN [RS:0;469387a2cdb6:45991 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:49:18,815 INFO [RS:0;469387a2cdb6:45991 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:49:18,815 DEBUG [RS:0;469387a2cdb6:45991 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575 2024-11-24T08:49:18,815 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [469387a2cdb6,45991,1732438158575] 2024-11-24T08:49:18,818 INFO [RS:0;469387a2cdb6:45991 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T08:49:18,819 INFO [RS:0;469387a2cdb6:45991 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T08:49:18,820 INFO [RS:0;469387a2cdb6:45991 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T08:49:18,820 INFO [RS:0;469387a2cdb6:45991 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:18,820 INFO [RS:0;469387a2cdb6:45991 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T08:49:18,821 INFO [RS:0;469387a2cdb6:45991 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T08:49:18,821 INFO [RS:0;469387a2cdb6:45991 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:18,821 DEBUG [RS:0;469387a2cdb6:45991 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:18,821 DEBUG [RS:0;469387a2cdb6:45991 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:18,821 DEBUG [RS:0;469387a2cdb6:45991 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:18,821 DEBUG [RS:0;469387a2cdb6:45991 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:18,821 DEBUG [RS:0;469387a2cdb6:45991 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:18,821 DEBUG [RS:0;469387a2cdb6:45991 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/469387a2cdb6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:49:18,821 DEBUG [RS:0;469387a2cdb6:45991 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:18,821 DEBUG [RS:0;469387a2cdb6:45991 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:18,821 DEBUG [RS:0;469387a2cdb6:45991 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:18,821 DEBUG [RS:0;469387a2cdb6:45991 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:18,822 DEBUG [RS:0;469387a2cdb6:45991 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:18,822 DEBUG [RS:0;469387a2cdb6:45991 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:18,822 DEBUG [RS:0;469387a2cdb6:45991 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/469387a2cdb6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:49:18,822 DEBUG [RS:0;469387a2cdb6:45991 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:49:18,823 INFO [RS:0;469387a2cdb6:45991 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:18,823 INFO [RS:0;469387a2cdb6:45991 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:18,823 INFO [RS:0;469387a2cdb6:45991 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:18,824 INFO [RS:0;469387a2cdb6:45991 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:18,824 INFO [RS:0;469387a2cdb6:45991 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:18,824 INFO [RS:0;469387a2cdb6:45991 {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,45991,1732438158575-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:49:18,839 INFO [RS:0;469387a2cdb6:45991 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T08:49:18,839 INFO [RS:0;469387a2cdb6:45991 {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,45991,1732438158575-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:18,839 INFO [RS:0;469387a2cdb6:45991 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:18,839 INFO [RS:0;469387a2cdb6:45991 {}] regionserver.Replication(171): 469387a2cdb6,45991,1732438158575 started 2024-11-24T08:49:18,853 INFO [RS:0;469387a2cdb6:45991 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:18,853 INFO [RS:0;469387a2cdb6:45991 {}] regionserver.HRegionServer(1482): Serving as 469387a2cdb6,45991,1732438158575, RpcServer on 469387a2cdb6/172.17.0.2:45991, sessionid=0x10070ebc91e0001 2024-11-24T08:49:18,853 DEBUG [RS:0;469387a2cdb6:45991 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T08:49:18,853 DEBUG [RS:0;469387a2cdb6:45991 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 469387a2cdb6,45991,1732438158575 2024-11-24T08:49:18,853 DEBUG [RS:0;469387a2cdb6:45991 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '469387a2cdb6,45991,1732438158575' 2024-11-24T08:49:18,853 DEBUG [RS:0;469387a2cdb6:45991 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T08:49:18,854 DEBUG [RS:0;469387a2cdb6:45991 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T08:49:18,855 DEBUG [RS:0;469387a2cdb6:45991 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T08:49:18,855 DEBUG [RS:0;469387a2cdb6:45991 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T08:49:18,855 DEBUG [RS:0;469387a2cdb6:45991 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 469387a2cdb6,45991,1732438158575 2024-11-24T08:49:18,855 DEBUG [RS:0;469387a2cdb6:45991 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '469387a2cdb6,45991,1732438158575' 2024-11-24T08:49:18,855 DEBUG [RS:0;469387a2cdb6:45991 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T08:49:18,855 DEBUG [RS:0;469387a2cdb6:45991 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T08:49:18,855 DEBUG [RS:0;469387a2cdb6:45991 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T08:49:18,855 INFO [RS:0;469387a2cdb6:45991 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T08:49:18,855 INFO [RS:0;469387a2cdb6:45991 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T08:49:18,910 WARN [469387a2cdb6:46577 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T08:49:18,959 INFO [RS:0;469387a2cdb6:45991 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=469387a2cdb6%2C45991%2C1732438158575, suffix=, logDir=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575, archiveDir=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/oldWALs, maxLogs=32 2024-11-24T08:49:18,960 INFO [RS:0;469387a2cdb6:45991 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C45991%2C1732438158575.1732438158960 2024-11-24T08:49:18,969 INFO [RS:0;469387a2cdb6:45991 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438158960 2024-11-24T08:49:18,970 DEBUG [RS:0;469387a2cdb6:45991 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42055:42055),(127.0.0.1/127.0.0.1:42889:42889)] 2024-11-24T08:49:19,160 DEBUG [469387a2cdb6:46577 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T08:49:19,162 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=469387a2cdb6,45991,1732438158575 2024-11-24T08:49:19,165 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 469387a2cdb6,45991,1732438158575, state=OPENING 2024-11-24T08:49:19,168 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T08:49:19,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:19,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45991-0x10070ebc91e0001, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:19,172 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:49:19,172 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:49:19,172 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:49:19,172 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=469387a2cdb6,45991,1732438158575}] 2024-11-24T08:49:19,329 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T08:49:19,333 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60873, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T08:49:19,337 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T08:49:19,337 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:49:19,340 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=469387a2cdb6%2C45991%2C1732438158575.meta, suffix=.meta, logDir=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575, archiveDir=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/oldWALs, maxLogs=32 2024-11-24T08:49:19,340 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C45991%2C1732438158575.meta.1732438159340.meta 2024-11-24T08:49:19,347 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.meta.1732438159340.meta 2024-11-24T08:49:19,348 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42889:42889),(127.0.0.1/127.0.0.1:42055:42055)] 2024-11-24T08:49:19,349 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:49:19,349 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T08:49:19,349 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T08:49:19,349 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T08:49:19,349 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T08:49:19,349 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:49:19,350 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T08:49:19,350 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T08:49:19,351 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:49:19,352 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:49:19,352 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:19,352 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:49:19,353 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:49:19,353 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:49:19,353 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:19,354 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:49:19,354 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:49:19,355 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:49:19,355 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:19,355 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:49:19,356 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:49:19,356 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:49:19,356 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:19,357 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:49:19,357 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:49:19,358 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/hbase/meta/1588230740 2024-11-24T08:49:19,359 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/hbase/meta/1588230740 2024-11-24T08:49:19,361 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:49:19,361 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:49:19,362 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:49:19,363 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:49:19,364 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=759255, jitterRate=-0.034557923674583435}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:49:19,365 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T08:49:19,365 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732438159350Writing region info on filesystem at 1732438159350Initializing all the Stores at 1732438159351 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438159351Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438159351Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438159351Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438159351Cleaning up temporary data from old regions at 1732438159361 (+10 ms)Running coprocessor post-open hooks at 1732438159365 (+4 ms)Region opened successfully at 1732438159365 2024-11-24T08:49:19,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:19,366 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732438159328 2024-11-24T08:49:19,369 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T08:49:19,369 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T08:49:19,370 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=469387a2cdb6,45991,1732438158575 2024-11-24T08:49:19,371 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 469387a2cdb6,45991,1732438158575, state=OPEN 2024-11-24T08:49:19,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:49:19,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45991-0x10070ebc91e0001, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:49:19,373 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=469387a2cdb6,45991,1732438158575 2024-11-24T08:49:19,373 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:49:19,373 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:49:19,375 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T08:49:19,375 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=469387a2cdb6,45991,1732438158575 in 201 msec 2024-11-24T08:49:19,377 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T08:49:19,377 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 618 msec 2024-11-24T08:49:19,378 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:49:19,378 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T08:49:19,379 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:49:19,380 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=469387a2cdb6,45991,1732438158575, seqNum=-1] 2024-11-24T08:49:19,380 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:49:19,381 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43069, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:49:19,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:19,387 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 666 msec 2024-11-24T08:49:19,387 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732438159387, completionTime=-1 2024-11-24T08:49:19,387 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T08:49:19,387 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T08:49:19,389 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T08:49:19,389 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732438219389 2024-11-24T08:49:19,389 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732438279389 2024-11-24T08:49:19,389 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-24T08:49:19,389 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,46577,1732438158530-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:19,389 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,46577,1732438158530-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:19,390 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,46577,1732438158530-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:19,390 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-469387a2cdb6:46577, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:19,390 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:19,391 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:19,392 DEBUG [master/469387a2cdb6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T08:49:19,394 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.788sec 2024-11-24T08:49:19,394 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T08:49:19,394 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T08:49:19,394 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T08:49:19,394 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T08:49:19,394 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T08:49:19,394 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,46577,1732438158530-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:49:19,395 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,46577,1732438158530-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T08:49:19,398 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T08:49:19,398 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T08:49:19,398 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,46577,1732438158530-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:19,495 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2caa9c5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:49:19,495 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 469387a2cdb6,46577,-1 for getting cluster id 2024-11-24T08:49:19,495 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T08:49:19,498 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '40c5adcc-3023-410f-9ce9-cd57d84aeab1' 2024-11-24T08:49:19,499 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T08:49:19,499 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "40c5adcc-3023-410f-9ce9-cd57d84aeab1" 2024-11-24T08:49:19,499 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d85c27e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:49:19,499 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [469387a2cdb6,46577,-1] 2024-11-24T08:49:19,500 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T08:49:19,500 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:49:19,502 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42528, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T08:49:19,503 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fa19949, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:49:19,504 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:49:19,505 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=469387a2cdb6,45991,1732438158575, seqNum=-1] 2024-11-24T08:49:19,505 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:49:19,508 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42360, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:49:19,510 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=469387a2cdb6,46577,1732438158530 2024-11-24T08:49:19,511 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:49:19,515 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T08:49:19,515 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-24T08:49:19,515 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-24T08:49:19,515 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T08:49:19,517 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 469387a2cdb6,46577,1732438158530 2024-11-24T08:49:19,517 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@67d03189 2024-11-24T08:49:19,517 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T08:49:19,519 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42544, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T08:49:19,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46577 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T08:49:19,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46577 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T08:49:19,519 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46577 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:49:19,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46577 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T08:49:19,522 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T08:49:19,522 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:19,522 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46577 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-24T08:49:19,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T08:49:19,523 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T08:49:19,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46299 is added to blk_1073741835_1011 (size=395) 2024-11-24T08:49:19,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36127 is added to blk_1073741835_1011 (size=395) 2024-11-24T08:49:19,531 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => df982e828e0c6e38afe8e86e86925bed, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55 2024-11-24T08:49:19,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46299 is added to blk_1073741836_1012 (size=78) 2024-11-24T08:49:19,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36127 is added to blk_1073741836_1012 (size=78) 2024-11-24T08:49:19,538 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:49:19,538 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing df982e828e0c6e38afe8e86e86925bed, disabling compactions & flushes 2024-11-24T08:49:19,538 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed. 2024-11-24T08:49:19,538 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed. 2024-11-24T08:49:19,538 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed. after waiting 0 ms 2024-11-24T08:49:19,538 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed. 2024-11-24T08:49:19,538 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed. 2024-11-24T08:49:19,538 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for df982e828e0c6e38afe8e86e86925bed: Waiting for close lock at 1732438159538Disabling compacts and flushes for region at 1732438159538Disabling writes for close at 1732438159538Writing region close event to WAL at 1732438159538Closed at 1732438159538 2024-11-24T08:49:19,540 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T08:49:19,540 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732438159540"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732438159540"}]},"ts":"1732438159540"} 2024-11-24T08:49:19,542 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T08:49:19,544 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T08:49:19,544 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732438159544"}]},"ts":"1732438159544"} 2024-11-24T08:49:19,546 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-24T08:49:19,547 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=df982e828e0c6e38afe8e86e86925bed, ASSIGN}] 2024-11-24T08:49:19,548 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=df982e828e0c6e38afe8e86e86925bed, ASSIGN 2024-11-24T08:49:19,549 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=df982e828e0c6e38afe8e86e86925bed, ASSIGN; state=OFFLINE, location=469387a2cdb6,45991,1732438158575; forceNewPlan=false, retain=false 2024-11-24T08:49:19,701 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=df982e828e0c6e38afe8e86e86925bed, regionState=OPENING, regionLocation=469387a2cdb6,45991,1732438158575 2024-11-24T08:49:19,708 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=df982e828e0c6e38afe8e86e86925bed, ASSIGN because future has completed 2024-11-24T08:49:19,710 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure df982e828e0c6e38afe8e86e86925bed, server=469387a2cdb6,45991,1732438158575}] 2024-11-24T08:49:19,870 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed. 2024-11-24T08:49:19,871 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => df982e828e0c6e38afe8e86e86925bed, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:49:19,871 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart df982e828e0c6e38afe8e86e86925bed 2024-11-24T08:49:19,871 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:49:19,871 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for df982e828e0c6e38afe8e86e86925bed 2024-11-24T08:49:19,871 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for df982e828e0c6e38afe8e86e86925bed 2024-11-24T08:49:19,873 INFO [StoreOpener-df982e828e0c6e38afe8e86e86925bed-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region df982e828e0c6e38afe8e86e86925bed 2024-11-24T08:49:19,875 INFO [StoreOpener-df982e828e0c6e38afe8e86e86925bed-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region df982e828e0c6e38afe8e86e86925bed columnFamilyName info 2024-11-24T08:49:19,875 DEBUG [StoreOpener-df982e828e0c6e38afe8e86e86925bed-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:19,876 INFO [StoreOpener-df982e828e0c6e38afe8e86e86925bed-1 {}] regionserver.HStore(327): Store=df982e828e0c6e38afe8e86e86925bed/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:49:19,876 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for df982e828e0c6e38afe8e86e86925bed 2024-11-24T08:49:19,877 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/default/TestLogRolling-testLogRollOnPipelineRestart/df982e828e0c6e38afe8e86e86925bed 2024-11-24T08:49:19,878 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/default/TestLogRolling-testLogRollOnPipelineRestart/df982e828e0c6e38afe8e86e86925bed 2024-11-24T08:49:19,879 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for df982e828e0c6e38afe8e86e86925bed 2024-11-24T08:49:19,879 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for df982e828e0c6e38afe8e86e86925bed 2024-11-24T08:49:19,881 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for df982e828e0c6e38afe8e86e86925bed 2024-11-24T08:49:19,883 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/default/TestLogRolling-testLogRollOnPipelineRestart/df982e828e0c6e38afe8e86e86925bed/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:49:19,884 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened df982e828e0c6e38afe8e86e86925bed; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=721610, jitterRate=-0.08242611587047577}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T08:49:19,884 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for df982e828e0c6e38afe8e86e86925bed 2024-11-24T08:49:19,884 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for df982e828e0c6e38afe8e86e86925bed: Running coprocessor pre-open hook at 1732438159871Writing region info on filesystem at 1732438159871Initializing all the Stores at 1732438159872 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438159872Cleaning up temporary data from old regions at 1732438159879 (+7 ms)Running coprocessor post-open hooks at 1732438159884 (+5 ms)Region opened successfully at 1732438159884 2024-11-24T08:49:19,885 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed., pid=6, masterSystemTime=1732438159866 2024-11-24T08:49:19,887 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed. 2024-11-24T08:49:19,887 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed. 2024-11-24T08:49:19,888 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=df982e828e0c6e38afe8e86e86925bed, regionState=OPEN, openSeqNum=2, regionLocation=469387a2cdb6,45991,1732438158575 2024-11-24T08:49:19,891 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure df982e828e0c6e38afe8e86e86925bed, server=469387a2cdb6,45991,1732438158575 because future has completed 2024-11-24T08:49:19,891 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46577 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=469387a2cdb6,45991,1732438158575, table=TestLogRolling-testLogRollOnPipelineRestart, region=df982e828e0c6e38afe8e86e86925bed. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-24T08:49:19,895 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T08:49:19,895 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure df982e828e0c6e38afe8e86e86925bed, server=469387a2cdb6,45991,1732438158575 in 182 msec 2024-11-24T08:49:19,897 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T08:49:19,897 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=df982e828e0c6e38afe8e86e86925bed, ASSIGN in 349 msec 2024-11-24T08:49:19,898 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T08:49:19,898 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732438159898"}]},"ts":"1732438159898"} 2024-11-24T08:49:19,900 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-24T08:49:19,901 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T08:49:19,903 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 383 msec 2024-11-24T08:49:20,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:20,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:21,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:21,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:22,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:22,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:23,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:23,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:24,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:24,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:24,858 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T08:49:24,882 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:24,882 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:24,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:24,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:24,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:24,884 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:24,887 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:24,887 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:24,887 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:24,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:24,894 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T08:49:24,895 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-24T08:49:25,318 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T08:49:25,318 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T08:49:25,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T08:49:25,319 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-24T08:49:25,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:49:25,319 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T08:49:25,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:25,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:26,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:26,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:27,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:27,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:28,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:28,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:29,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:29,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:29,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T08:49:29,618 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-24T08:49:29,619 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-24T08:49:29,626 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T08:49:29,626 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed. 2024-11-24T08:49:29,631 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed., hostname=469387a2cdb6,45991,1732438158575, seqNum=2] 2024-11-24T08:49:30,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:30,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:31,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:31,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:31,634 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438158960 2024-11-24T08:49:31,635 WARN [ResponseProcessor for block BP-590959869-172.17.0.2-1732438157948:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-590959869-172.17.0.2-1732438157948:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:31,635 WARN [ResponseProcessor for block BP-590959869-172.17.0.2-1732438157948:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-590959869-172.17.0.2-1732438157948:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-590959869-172.17.0.2-1732438157948:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:46299,DS-3567303f-bbfc-400b-9af1-2c28f5ff7090,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:31,635 WARN [DataStreamer for file /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438158960 block BP-590959869-172.17.0.2-1732438157948:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-590959869-172.17.0.2-1732438157948:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46299,DS-3567303f-bbfc-400b-9af1-2c28f5ff7090,DISK], DatanodeInfoWithStorage[127.0.0.1:36127,DS-bce299a1-4174-4a22-b417-838e45f8cdbb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46299,DS-3567303f-bbfc-400b-9af1-2c28f5ff7090,DISK]) is bad. 2024-11-24T08:49:31,635 WARN [DataStreamer for file /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/WALs/469387a2cdb6,46577,1732438158530/469387a2cdb6%2C46577%2C1732438158530.1732438158660 block BP-590959869-172.17.0.2-1732438157948:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-590959869-172.17.0.2-1732438157948:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36127,DS-bce299a1-4174-4a22-b417-838e45f8cdbb,DISK], DatanodeInfoWithStorage[127.0.0.1:46299,DS-3567303f-bbfc-400b-9af1-2c28f5ff7090,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46299,DS-3567303f-bbfc-400b-9af1-2c28f5ff7090,DISK]) is bad. 2024-11-24T08:49:31,635 WARN [ResponseProcessor for block BP-590959869-172.17.0.2-1732438157948:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-590959869-172.17.0.2-1732438157948:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-590959869-172.17.0.2-1732438157948:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:46299,DS-3567303f-bbfc-400b-9af1-2c28f5ff7090,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:31,635 WARN [PacketResponder: BP-590959869-172.17.0.2-1732438157948:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46299] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:31,636 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_475397286_22 at /127.0.0.1:53480 [Receiving block BP-590959869-172.17.0.2-1732438157948:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46299:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53480 dst: /127.0.0.1:46299 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:31,636 WARN [DataStreamer for file /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.meta.1732438159340.meta block BP-590959869-172.17.0.2-1732438157948:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-590959869-172.17.0.2-1732438157948:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36127,DS-bce299a1-4174-4a22-b417-838e45f8cdbb,DISK], DatanodeInfoWithStorage[127.0.0.1:46299,DS-3567303f-bbfc-400b-9af1-2c28f5ff7090,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46299,DS-3567303f-bbfc-400b-9af1-2c28f5ff7090,DISK]) is bad. 2024-11-24T08:49:31,636 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1414158947_22 at /127.0.0.1:57314 [Receiving block BP-590959869-172.17.0.2-1732438157948:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36127:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57314 dst: /127.0.0.1:36127 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:31,636 WARN [PacketResponder: BP-590959869-172.17.0.2-1732438157948:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46299] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:31,636 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_475397286_22 at /127.0.0.1:46022 [Receiving block BP-590959869-172.17.0.2-1732438157948:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36127:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46022 dst: /127.0.0.1:36127 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:31,636 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_475397286_22 at /127.0.0.1:46026 [Receiving block BP-590959869-172.17.0.2-1732438157948:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36127:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46026 dst: /127.0.0.1:36127 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:31,636 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1414158947_22 at /127.0.0.1:46246 [Receiving block BP-590959869-172.17.0.2-1732438157948:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46299:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46246 dst: /127.0.0.1:46299 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:31,637 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_475397286_22 at /127.0.0.1:53482 [Receiving block BP-590959869-172.17.0.2-1732438157948:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46299:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53482 dst: /127.0.0.1:46299 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:31,638 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@182fe99a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:49:31,638 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3df8f3f0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:49:31,638 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:49:31,639 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e0a81cd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:49:31,639 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e024519{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/hadoop.log.dir/,STOPPED} 2024-11-24T08:49:31,640 WARN [BP-590959869-172.17.0.2-1732438157948 heartbeating to localhost/127.0.0.1:38481 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:49:31,640 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:49:31,640 WARN [BP-590959869-172.17.0.2-1732438157948 heartbeating to localhost/127.0.0.1:38481 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-590959869-172.17.0.2-1732438157948 (Datanode Uuid 4ab5b28e-fd22-45e1-bb99-28e8f51d553b) service to localhost/127.0.0.1:38481 2024-11-24T08:49:31,640 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:49:31,641 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/cluster_415cfaf0-356b-7498-76a9-57da45216436/data/data3/current/BP-590959869-172.17.0.2-1732438157948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:49:31,641 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/cluster_415cfaf0-356b-7498-76a9-57da45216436/data/data4/current/BP-590959869-172.17.0.2-1732438157948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:49:31,641 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:49:31,655 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:49:31,659 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:49:31,659 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:49:31,659 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:49:31,659 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:49:31,660 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@445ec646{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:49:31,660 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c6bf72b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:49:31,767 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@28a46d74{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/java.io.tmpdir/jetty-localhost-36693-hadoop-hdfs-3_4_1-tests_jar-_-any-7872713173165770210/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:49:31,767 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@30643319{HTTP/1.1, (http/1.1)}{localhost:36693} 2024-11-24T08:49:31,767 INFO [Time-limited test {}] server.Server(415): Started @162384ms 2024-11-24T08:49:31,768 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:49:31,788 WARN [ResponseProcessor for block BP-590959869-172.17.0.2-1732438157948:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-590959869-172.17.0.2-1732438157948:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:31,788 WARN [ResponseProcessor for block BP-590959869-172.17.0.2-1732438157948:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-590959869-172.17.0.2-1732438157948:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:31,788 WARN [ResponseProcessor for block BP-590959869-172.17.0.2-1732438157948:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-590959869-172.17.0.2-1732438157948:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:31,789 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1414158947_22 at /127.0.0.1:43660 [Receiving block BP-590959869-172.17.0.2-1732438157948:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36127:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43660 dst: /127.0.0.1:36127 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:31,789 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_475397286_22 at /127.0.0.1:43648 [Receiving block BP-590959869-172.17.0.2-1732438157948:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36127:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43648 dst: /127.0.0.1:36127 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:31,789 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_475397286_22 at /127.0.0.1:43646 [Receiving block BP-590959869-172.17.0.2-1732438157948:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36127:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43646 dst: /127.0.0.1:36127 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:31,798 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7bc967ae{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:49:31,799 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@16962f7c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:49:31,799 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:49:31,799 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3409b3ba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:49:31,799 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@559a021a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/hadoop.log.dir/,STOPPED} 2024-11-24T08:49:31,800 WARN [BP-590959869-172.17.0.2-1732438157948 heartbeating to localhost/127.0.0.1:38481 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:49:31,800 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:49:31,800 WARN [BP-590959869-172.17.0.2-1732438157948 heartbeating to localhost/127.0.0.1:38481 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-590959869-172.17.0.2-1732438157948 (Datanode Uuid 7a87b268-3c7d-4052-ae7d-66aa3b889c69) service to localhost/127.0.0.1:38481 2024-11-24T08:49:31,800 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:49:31,801 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/cluster_415cfaf0-356b-7498-76a9-57da45216436/data/data1/current/BP-590959869-172.17.0.2-1732438157948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:49:31,801 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/cluster_415cfaf0-356b-7498-76a9-57da45216436/data/data2/current/BP-590959869-172.17.0.2-1732438157948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:49:31,801 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:49:31,815 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:49:31,820 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:49:31,825 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:49:31,825 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:49:31,825 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:49:31,826 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22d58dc9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:49:31,826 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f8b505d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:49:31,862 WARN [Thread-1333 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:49:31,865 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcb0c8e4b6e546747 with lease ID 0x8f522ae550721696: from storage DS-3567303f-bbfc-400b-9af1-2c28f5ff7090 node DatanodeRegistration(127.0.0.1:39577, datanodeUuid=4ab5b28e-fd22-45e1-bb99-28e8f51d553b, infoPort=33683, infoSecurePort=0, ipcPort=44079, storageInfo=lv=-57;cid=testClusterID;nsid=479861250;c=1732438157948), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:49:31,865 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcb0c8e4b6e546747 with lease ID 0x8f522ae550721696: from storage DS-7f14f266-0fe7-4a57-8405-e7ed6b9a0f23 node DatanodeRegistration(127.0.0.1:39577, datanodeUuid=4ab5b28e-fd22-45e1-bb99-28e8f51d553b, infoPort=33683, infoSecurePort=0, ipcPort=44079, storageInfo=lv=-57;cid=testClusterID;nsid=479861250;c=1732438157948), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:49:31,933 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4acdf448{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/java.io.tmpdir/jetty-localhost-45411-hadoop-hdfs-3_4_1-tests_jar-_-any-12729391715078473381/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:49:31,933 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2f1f6b88{HTTP/1.1, (http/1.1)}{localhost:45411} 2024-11-24T08:49:31,933 INFO [Time-limited test {}] server.Server(415): Started @162550ms 2024-11-24T08:49:31,934 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:49:32,036 WARN [Thread-1364 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:49:32,039 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1274c8247078d1af with lease ID 0x8f522ae550721697: from storage DS-bce299a1-4174-4a22-b417-838e45f8cdbb node DatanodeRegistration(127.0.0.1:40719, datanodeUuid=7a87b268-3c7d-4052-ae7d-66aa3b889c69, infoPort=37977, infoSecurePort=0, ipcPort=36677, storageInfo=lv=-57;cid=testClusterID;nsid=479861250;c=1732438157948), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:49:32,039 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1274c8247078d1af with lease ID 0x8f522ae550721697: from storage DS-a1487033-4466-419b-9411-eb283096a660 node DatanodeRegistration(127.0.0.1:40719, datanodeUuid=7a87b268-3c7d-4052-ae7d-66aa3b889c69, infoPort=37977, infoSecurePort=0, ipcPort=36677, storageInfo=lv=-57;cid=testClusterID;nsid=479861250;c=1732438157948), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:49:32,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:32,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:32,951 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-24T08:49:32,955 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-24T08:49:32,957 ERROR [FSHLog-0-hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55-prefix:469387a2cdb6,45991,1732438158575 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36127,DS-bce299a1-4174-4a22-b417-838e45f8cdbb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:32,957 WARN [FSHLog-0-hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55-prefix:469387a2cdb6,45991,1732438158575 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36127,DS-bce299a1-4174-4a22-b417-838e45f8cdbb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:32,958 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 469387a2cdb6%2C45991%2C1732438158575:(num 1732438158960) roll requested 2024-11-24T08:49:32,958 INFO [regionserver/469387a2cdb6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C45991%2C1732438158575.1732438172958 2024-11-24T08:49:32,964 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438158960 newFile=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438172958 2024-11-24T08:49:32,964 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:32,964 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:32,965 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:32,965 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:32,965 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:32,965 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438158960 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438172958 2024-11-24T08:49:32,966 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36127,DS-bce299a1-4174-4a22-b417-838e45f8cdbb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:32,966 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36127,DS-bce299a1-4174-4a22-b417-838e45f8cdbb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:32,967 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438158960 2024-11-24T08:49:32,967 WARN [IPC Server handler 3 on default port 38481 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438158960 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-24T08:49:32,967 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438158960 after 0ms 2024-11-24T08:49:32,969 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33683:33683),(127.0.0.1/127.0.0.1:37977:37977)] 2024-11-24T08:49:32,969 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438158960 is not closed yet, will try archiving it next time 2024-11-24T08:49:33,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:33,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:33,866 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T08:49:34,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:34,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:34,972 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-24T08:49:35,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:35,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:36,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:36,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:36,969 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438158960 after 4002ms 2024-11-24T08:49:36,975 WARN [ResponseProcessor for block BP-590959869-172.17.0.2-1732438157948:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-590959869-172.17.0.2-1732438157948:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-590959869-172.17.0.2-1732438157948:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:40719,DS-bce299a1-4174-4a22-b417-838e45f8cdbb,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:36,976 WARN [DataStreamer for file /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438172958 block BP-590959869-172.17.0.2-1732438157948:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-590959869-172.17.0.2-1732438157948:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39577,DS-3567303f-bbfc-400b-9af1-2c28f5ff7090,DISK], DatanodeInfoWithStorage[127.0.0.1:40719,DS-bce299a1-4174-4a22-b417-838e45f8cdbb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40719,DS-bce299a1-4174-4a22-b417-838e45f8cdbb,DISK]) is bad. 2024-11-24T08:49:36,976 WARN [PacketResponder: BP-590959869-172.17.0.2-1732438157948:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40719] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:36,977 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_475397286_22 at /127.0.0.1:52308 [Receiving block BP-590959869-172.17.0.2-1732438157948:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:39577:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52308 dst: /127.0.0.1:39577 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:36,978 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_475397286_22 at /127.0.0.1:52730 [Receiving block BP-590959869-172.17.0.2-1732438157948:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:40719:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52730 dst: /127.0.0.1:40719 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:36,980 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4acdf448{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:49:36,980 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2f1f6b88{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:49:36,980 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:49:36,981 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f8b505d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:49:36,981 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22d58dc9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/hadoop.log.dir/,STOPPED} 2024-11-24T08:49:36,982 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:49:36,982 WARN [BP-590959869-172.17.0.2-1732438157948 heartbeating to localhost/127.0.0.1:38481 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:49:36,982 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:49:36,982 WARN [BP-590959869-172.17.0.2-1732438157948 heartbeating to localhost/127.0.0.1:38481 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-590959869-172.17.0.2-1732438157948 (Datanode Uuid 7a87b268-3c7d-4052-ae7d-66aa3b889c69) service to localhost/127.0.0.1:38481 2024-11-24T08:49:36,983 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/cluster_415cfaf0-356b-7498-76a9-57da45216436/data/data1/current/BP-590959869-172.17.0.2-1732438157948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:49:36,983 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/cluster_415cfaf0-356b-7498-76a9-57da45216436/data/data2/current/BP-590959869-172.17.0.2-1732438157948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:49:36,983 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:49:36,992 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:49:36,995 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:49:36,996 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:49:36,996 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:49:36,996 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:49:36,997 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@79c2c9b7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:49:36,997 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1543fd34{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:49:37,090 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b5d2e48{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/java.io.tmpdir/jetty-localhost-40331-hadoop-hdfs-3_4_1-tests_jar-_-any-811618750546224303/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:49:37,091 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@76ee45be{HTTP/1.1, (http/1.1)}{localhost:40331} 2024-11-24T08:49:37,091 INFO [Time-limited test {}] server.Server(415): Started @167708ms 2024-11-24T08:49:37,092 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:49:37,107 WARN [ResponseProcessor for block BP-590959869-172.17.0.2-1732438157948:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-590959869-172.17.0.2-1732438157948:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:37,107 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_475397286_22 at /127.0.0.1:52324 [Receiving block BP-590959869-172.17.0.2-1732438157948:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:39577:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52324 dst: /127.0.0.1:39577 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:37,110 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@28a46d74{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:49:37,110 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@30643319{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:49:37,110 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:49:37,111 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c6bf72b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:49:37,111 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@445ec646{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/hadoop.log.dir/,STOPPED} 2024-11-24T08:49:37,112 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:49:37,112 WARN [BP-590959869-172.17.0.2-1732438157948 heartbeating to localhost/127.0.0.1:38481 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:49:37,112 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:49:37,112 WARN [BP-590959869-172.17.0.2-1732438157948 heartbeating to localhost/127.0.0.1:38481 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-590959869-172.17.0.2-1732438157948 (Datanode Uuid 4ab5b28e-fd22-45e1-bb99-28e8f51d553b) service to localhost/127.0.0.1:38481 2024-11-24T08:49:37,112 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/cluster_415cfaf0-356b-7498-76a9-57da45216436/data/data4/current/BP-590959869-172.17.0.2-1732438157948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:49:37,113 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/cluster_415cfaf0-356b-7498-76a9-57da45216436/data/data3/current/BP-590959869-172.17.0.2-1732438157948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:49:37,113 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:49:37,124 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:49:37,128 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:49:37,129 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:49:37,129 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:49:37,129 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:49:37,129 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18e7d289{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:49:37,130 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4da55ed8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:49:37,159 WARN [Thread-1407 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:49:37,161 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3f92e3a8e6f04e49 with lease ID 0x8f522ae550721698: from storage DS-bce299a1-4174-4a22-b417-838e45f8cdbb node DatanodeRegistration(127.0.0.1:42351, datanodeUuid=7a87b268-3c7d-4052-ae7d-66aa3b889c69, infoPort=35561, infoSecurePort=0, ipcPort=35229, storageInfo=lv=-57;cid=testClusterID;nsid=479861250;c=1732438157948), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:49:37,161 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3f92e3a8e6f04e49 with lease ID 0x8f522ae550721698: from storage DS-a1487033-4466-419b-9411-eb283096a660 node DatanodeRegistration(127.0.0.1:42351, datanodeUuid=7a87b268-3c7d-4052-ae7d-66aa3b889c69, infoPort=35561, infoSecurePort=0, ipcPort=35229, storageInfo=lv=-57;cid=testClusterID;nsid=479861250;c=1732438157948), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T08:49:37,230 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4856962{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/java.io.tmpdir/jetty-localhost-46771-hadoop-hdfs-3_4_1-tests_jar-_-any-18314568763744433799/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:49:37,230 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@17ea7577{HTTP/1.1, (http/1.1)}{localhost:46771} 2024-11-24T08:49:37,230 INFO [Time-limited test {}] server.Server(415): Started @167847ms 2024-11-24T08:49:37,232 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:49:37,300 WARN [Thread-1438 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:49:37,303 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9e6649cba1673d4a with lease ID 0x8f522ae550721699: from storage DS-3567303f-bbfc-400b-9af1-2c28f5ff7090 node DatanodeRegistration(127.0.0.1:39937, datanodeUuid=4ab5b28e-fd22-45e1-bb99-28e8f51d553b, infoPort=38047, infoSecurePort=0, ipcPort=41171, storageInfo=lv=-57;cid=testClusterID;nsid=479861250;c=1732438157948), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:49:37,303 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9e6649cba1673d4a with lease ID 0x8f522ae550721699: from storage DS-7f14f266-0fe7-4a57-8405-e7ed6b9a0f23 node DatanodeRegistration(127.0.0.1:39937, datanodeUuid=4ab5b28e-fd22-45e1-bb99-28e8f51d553b, infoPort=38047, infoSecurePort=0, ipcPort=41171, storageInfo=lv=-57;cid=testClusterID;nsid=479861250;c=1732438157948), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:49:37,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:37,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:38,248 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-24T08:49:38,254 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-24T08:49:38,256 ERROR [FSHLog-0-hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55-prefix:469387a2cdb6,45991,1732438158575 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39577,DS-3567303f-bbfc-400b-9af1-2c28f5ff7090,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:38,256 WARN [FSHLog-0-hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55-prefix:469387a2cdb6,45991,1732438158575 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39577,DS-3567303f-bbfc-400b-9af1-2c28f5ff7090,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:38,257 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 469387a2cdb6%2C45991%2C1732438158575:(num 1732438172958) roll requested 2024-11-24T08:49:38,257 INFO [regionserver/469387a2cdb6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C45991%2C1732438158575.1732438178257 2024-11-24T08:49:38,264 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438172958 newFile=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438178257 2024-11-24T08:49:38,264 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:38,264 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:38,264 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:38,264 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:38,264 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:38,265 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438172958 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438178257 2024-11-24T08:49:38,265 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39577,DS-3567303f-bbfc-400b-9af1-2c28f5ff7090,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:38,265 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39577,DS-3567303f-bbfc-400b-9af1-2c28f5ff7090,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:38,265 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438172958 2024-11-24T08:49:38,266 WARN [IPC Server handler 0 on default port 38481 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438172958 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-24T08:49:38,266 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35561:35561),(127.0.0.1/127.0.0.1:38047:38047)] 2024-11-24T08:49:38,266 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438172958 is not closed yet, will try archiving it next time 2024-11-24T08:49:38,266 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438172958 after 1ms 2024-11-24T08:49:38,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:38,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:39,162 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T08:49:39,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:39,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:40,268 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C45991%2C1732438158575.1732438180268 2024-11-24T08:49:40,279 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438178257 newFile=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438180268 2024-11-24T08:49:40,280 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:40,280 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:40,280 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:40,281 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:40,281 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:40,281 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438178257 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438180268 2024-11-24T08:49:40,283 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38047:38047),(127.0.0.1/127.0.0.1:35561:35561)] 2024-11-24T08:49:40,284 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438172958 is not closed yet, will try archiving it next time 2024-11-24T08:49:40,284 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438178257 is not closed yet, will try archiving it next time 2024-11-24T08:49:40,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39937 is added to blk_1073741838_1019 (size=1264) 2024-11-24T08:49:40,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42351 is added to blk_1073741838_1019 (size=1264) 2024-11-24T08:49:40,284 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438158960 2024-11-24T08:49:40,284 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438158960 2024-11-24T08:49:40,285 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438158960 after 1ms 2024-11-24T08:49:40,285 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438172958 is not closed yet, will try archiving it next time 2024-11-24T08:49:40,285 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438158960 2024-11-24T08:49:40,294 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732438159884/Put/vlen=218/seqid=0] 2024-11-24T08:49:40,294 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732438169632/Put/vlen=1045/seqid=0] 2024-11-24T08:49:40,294 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438158960 2024-11-24T08:49:40,294 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438172958 2024-11-24T08:49:40,294 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438172958 2024-11-24T08:49:40,295 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438172958 after 1ms 2024-11-24T08:49:40,295 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438172958 2024-11-24T08:49:40,298 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732438172957/Put/vlen=1045/seqid=0] 2024-11-24T08:49:40,298 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732438174973/Put/vlen=1045/seqid=0] 2024-11-24T08:49:40,298 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438172958 2024-11-24T08:49:40,298 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438178257 2024-11-24T08:49:40,298 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438178257 2024-11-24T08:49:40,299 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438178257 after 1ms 2024-11-24T08:49:40,299 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438178257 2024-11-24T08:49:40,302 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732438178256/Put/vlen=1045/seqid=0] 2024-11-24T08:49:40,302 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438180268 2024-11-24T08:49:40,303 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438180268 2024-11-24T08:49:40,303 WARN [IPC Server handler 4 on default port 38481 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438180268 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-24T08:49:40,303 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438180268 after 0ms 2024-11-24T08:49:40,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:40,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:41,170 WARN [ResponseProcessor for block BP-590959869-172.17.0.2-1732438157948:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-590959869-172.17.0.2-1732438157948:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:41,170 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1414158947_22 at /127.0.0.1:60078 [Receiving block BP-590959869-172.17.0.2-1732438157948:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:39937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60078 dst: /127.0.0.1:39937 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:39937 remote=/127.0.0.1:60078]. Total timeout mills is 60000, 59109 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:41,170 WARN [DataStreamer for file /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438180268 block BP-590959869-172.17.0.2-1732438157948:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-590959869-172.17.0.2-1732438157948:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39937,DS-3567303f-bbfc-400b-9af1-2c28f5ff7090,DISK], DatanodeInfoWithStorage[127.0.0.1:42351,DS-bce299a1-4174-4a22-b417-838e45f8cdbb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39937,DS-3567303f-bbfc-400b-9af1-2c28f5ff7090,DISK]) is bad. 2024-11-24T08:49:41,170 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1414158947_22 at /127.0.0.1:59984 [Receiving block BP-590959869-172.17.0.2-1732438157948:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:42351:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59984 dst: /127.0.0.1:42351 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:49:41,176 WARN [DataStreamer for file /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438180268 block BP-590959869-172.17.0.2-1732438157948:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-590959869-172.17.0.2-1732438157948:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:41,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39937 is added to blk_1073741839_1022 (size=85) 2024-11-24T08:49:41,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:41,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:42,268 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438172958 after 4003ms 2024-11-24T08:49:42,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:42,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:43,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:43,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:44,305 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438180268 after 4002ms 2024-11-24T08:49:44,305 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438180268 2024-11-24T08:49:44,314 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438180268 2024-11-24T08:49:44,314 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing df982e828e0c6e38afe8e86e86925bed 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-24T08:49:44,315 ERROR [FSHLog-0-hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55-prefix:469387a2cdb6,45991,1732438158575 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-590959869-172.17.0.2-1732438157948:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:44,315 WARN [FSHLog-0-hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55-prefix:469387a2cdb6,45991,1732438158575 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-590959869-172.17.0.2-1732438157948:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:44,315 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 469387a2cdb6%2C45991%2C1732438158575:(num 1732438180268) roll requested 2024-11-24T08:49:44,316 INFO [regionserver/469387a2cdb6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C45991%2C1732438158575.1732438184316 2024-11-24T08:49:44,321 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438180268 newFile=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438184316 2024-11-24T08:49:44,321 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:44,321 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:44,321 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:44,321 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:44,321 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:44,321 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438180268 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438184316 2024-11-24T08:49:44,322 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-590959869-172.17.0.2-1732438157948:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:44,322 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-590959869-172.17.0.2-1732438157948:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:44,322 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438180268 2024-11-24T08:49:44,323 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438180268 after 1ms 2024-11-24T08:49:44,325 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.1732438180268 to hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/oldWALs/469387a2cdb6%2C45991%2C1732438158575.1732438180268 2024-11-24T08:49:44,325 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38047:38047),(127.0.0.1/127.0.0.1:35561:35561)] 2024-11-24T08:49:44,345 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/default/TestLogRolling-testLogRollOnPipelineRestart/df982e828e0c6e38afe8e86e86925bed/.tmp/info/4d7c6699aaa045dca73189315e8f40a4 is 1080, key is row1002/info:/1732438169632/Put/seqid=0 2024-11-24T08:49:44,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42351 is added to blk_1073741841_1024 (size=9270) 2024-11-24T08:49:44,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39937 is added to blk_1073741841_1024 (size=9270) 2024-11-24T08:49:44,350 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/default/TestLogRolling-testLogRollOnPipelineRestart/df982e828e0c6e38afe8e86e86925bed/.tmp/info/4d7c6699aaa045dca73189315e8f40a4 2024-11-24T08:49:44,357 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/default/TestLogRolling-testLogRollOnPipelineRestart/df982e828e0c6e38afe8e86e86925bed/.tmp/info/4d7c6699aaa045dca73189315e8f40a4 as hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/default/TestLogRolling-testLogRollOnPipelineRestart/df982e828e0c6e38afe8e86e86925bed/info/4d7c6699aaa045dca73189315e8f40a4 2024-11-24T08:49:44,362 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/default/TestLogRolling-testLogRollOnPipelineRestart/df982e828e0c6e38afe8e86e86925bed/info/4d7c6699aaa045dca73189315e8f40a4, entries=4, sequenceid=8, filesize=9.1 K 2024-11-24T08:49:44,363 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for df982e828e0c6e38afe8e86e86925bed in 49ms, sequenceid=8, compaction requested=false 2024-11-24T08:49:44,363 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for df982e828e0c6e38afe8e86e86925bed: 2024-11-24T08:49:44,363 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-24T08:49:44,363 ERROR [FSHLog-0-hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55-prefix:469387a2cdb6,45991,1732438158575.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36127,DS-bce299a1-4174-4a22-b417-838e45f8cdbb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:44,364 WARN [FSHLog-0-hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55-prefix:469387a2cdb6,45991,1732438158575.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36127,DS-bce299a1-4174-4a22-b417-838e45f8cdbb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:44,364 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 469387a2cdb6%2C45991%2C1732438158575.meta:.meta(num 1732438159340) roll requested 2024-11-24T08:49:44,364 INFO [regionserver/469387a2cdb6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C45991%2C1732438158575.meta.1732438184364.meta 2024-11-24T08:49:44,369 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:44,369 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:44,369 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:44,369 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:44,369 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:44,369 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.meta.1732438159340.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.meta.1732438184364.meta 2024-11-24T08:49:44,369 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36127,DS-bce299a1-4174-4a22-b417-838e45f8cdbb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:44,369 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36127,DS-bce299a1-4174-4a22-b417-838e45f8cdbb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:44,370 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.meta.1732438159340.meta 2024-11-24T08:49:44,370 WARN [IPC Server handler 2 on default port 38481 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.meta.1732438159340.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1013 2024-11-24T08:49:44,370 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.meta.1732438159340.meta after 0ms 2024-11-24T08:49:44,370 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38047:38047),(127.0.0.1/127.0.0.1:35561:35561)] 2024-11-24T08:49:44,370 DEBUG [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.meta.1732438159340.meta is not closed yet, will try archiving it next time 2024-11-24T08:49:44,387 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/hbase/meta/1588230740/.tmp/info/743647cff39c448ea7e51cd2b4bd2f70 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed./info:regioninfo/1732438159888/Put/seqid=0 2024-11-24T08:49:44,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39937 is added to blk_1073741843_1027 (size=7125) 2024-11-24T08:49:44,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42351 is added to blk_1073741843_1027 (size=7125) 2024-11-24T08:49:44,392 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/hbase/meta/1588230740/.tmp/info/743647cff39c448ea7e51cd2b4bd2f70 2024-11-24T08:49:44,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:44,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:44,411 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/hbase/meta/1588230740/.tmp/ns/07682c1f56c14c3788c98493dd9ed1db is 43, key is default/ns:d/1732438159382/Put/seqid=0 2024-11-24T08:49:44,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39937 is added to blk_1073741844_1028 (size=5153) 2024-11-24T08:49:44,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42351 is added to blk_1073741844_1028 (size=5153) 2024-11-24T08:49:44,417 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/hbase/meta/1588230740/.tmp/ns/07682c1f56c14c3788c98493dd9ed1db 2024-11-24T08:49:44,434 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/hbase/meta/1588230740/.tmp/table/726ab54b208646708d269ceb139edd94 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732438159898/Put/seqid=0 2024-11-24T08:49:44,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42351 is added to blk_1073741845_1029 (size=5438) 2024-11-24T08:49:44,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39937 is added to blk_1073741845_1029 (size=5438) 2024-11-24T08:49:44,440 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/hbase/meta/1588230740/.tmp/table/726ab54b208646708d269ceb139edd94 2024-11-24T08:49:44,446 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/hbase/meta/1588230740/.tmp/info/743647cff39c448ea7e51cd2b4bd2f70 as hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/hbase/meta/1588230740/info/743647cff39c448ea7e51cd2b4bd2f70 2024-11-24T08:49:44,451 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/hbase/meta/1588230740/info/743647cff39c448ea7e51cd2b4bd2f70, entries=10, sequenceid=11, filesize=7.0 K 2024-11-24T08:49:44,452 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/hbase/meta/1588230740/.tmp/ns/07682c1f56c14c3788c98493dd9ed1db as hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/hbase/meta/1588230740/ns/07682c1f56c14c3788c98493dd9ed1db 2024-11-24T08:49:44,458 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/hbase/meta/1588230740/ns/07682c1f56c14c3788c98493dd9ed1db, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T08:49:44,459 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/hbase/meta/1588230740/.tmp/table/726ab54b208646708d269ceb139edd94 as hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/hbase/meta/1588230740/table/726ab54b208646708d269ceb139edd94 2024-11-24T08:49:44,464 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/hbase/meta/1588230740/table/726ab54b208646708d269ceb139edd94, entries=2, sequenceid=11, filesize=5.3 K 2024-11-24T08:49:44,465 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 102ms, sequenceid=11, compaction requested=false 2024-11-24T08:49:44,465 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-24T08:49:44,469 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T08:49:44,470 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:49:44,470 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:49:44,470 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:49:44,470 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:49:44,470 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T08:49:44,470 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T08:49:44,470 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=711113781, stopped=false 2024-11-24T08:49:44,470 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=469387a2cdb6,46577,1732438158530 2024-11-24T08:49:44,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:49:44,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45991-0x10070ebc91e0001, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:49:44,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:44,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45991-0x10070ebc91e0001, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:44,471 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:49:44,472 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:49:44,472 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:49:44,472 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:49:44,472 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45991-0x10070ebc91e0001, quorum=127.0.0.1:52517, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:49:44,472 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '469387a2cdb6,45991,1732438158575' ***** 2024-11-24T08:49:44,472 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:49:44,472 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T08:49:44,472 INFO [RS:0;469387a2cdb6:45991 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T08:49:44,472 INFO [RS:0;469387a2cdb6:45991 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T08:49:44,472 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T08:49:44,472 INFO [RS:0;469387a2cdb6:45991 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T08:49:44,472 INFO [RS:0;469387a2cdb6:45991 {}] regionserver.HRegionServer(3091): Received CLOSE for df982e828e0c6e38afe8e86e86925bed 2024-11-24T08:49:44,473 INFO [RS:0;469387a2cdb6:45991 {}] regionserver.HRegionServer(959): stopping server 469387a2cdb6,45991,1732438158575 2024-11-24T08:49:44,473 INFO [RS:0;469387a2cdb6:45991 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:49:44,473 INFO [RS:0;469387a2cdb6:45991 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;469387a2cdb6:45991. 2024-11-24T08:49:44,473 DEBUG [RS:0;469387a2cdb6:45991 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:49:44,473 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing df982e828e0c6e38afe8e86e86925bed, disabling compactions & flushes 2024-11-24T08:49:44,473 DEBUG [RS:0;469387a2cdb6:45991 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:49:44,473 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed. 2024-11-24T08:49:44,473 INFO [RS:0;469387a2cdb6:45991 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T08:49:44,473 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed. 2024-11-24T08:49:44,473 INFO [RS:0;469387a2cdb6:45991 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T08:49:44,473 INFO [RS:0;469387a2cdb6:45991 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T08:49:44,473 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed. after waiting 0 ms 2024-11-24T08:49:44,473 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed. 2024-11-24T08:49:44,473 INFO [RS:0;469387a2cdb6:45991 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T08:49:44,473 INFO [RS:0;469387a2cdb6:45991 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T08:49:44,473 DEBUG [RS:0;469387a2cdb6:45991 {}] regionserver.HRegionServer(1325): Online Regions={df982e828e0c6e38afe8e86e86925bed=TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed., 1588230740=hbase:meta,,1.1588230740} 2024-11-24T08:49:44,473 DEBUG [RS:0;469387a2cdb6:45991 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, df982e828e0c6e38afe8e86e86925bed 2024-11-24T08:49:44,473 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:49:44,473 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:49:44,473 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:49:44,473 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:49:44,473 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:49:44,478 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T08:49:44,478 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/data/default/TestLogRolling-testLogRollOnPipelineRestart/df982e828e0c6e38afe8e86e86925bed/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-24T08:49:44,478 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:49:44,478 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:49:44,478 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed. 2024-11-24T08:49:44,478 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732438184473Running coprocessor pre-close hooks at 1732438184473Disabling compacts and flushes for region at 1732438184473Disabling writes for close at 1732438184473Writing region close event to WAL at 1732438184474 (+1 ms)Running coprocessor post-close hooks at 1732438184478 (+4 ms)Closed at 1732438184478 2024-11-24T08:49:44,478 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for df982e828e0c6e38afe8e86e86925bed: Waiting for close lock at 1732438184473Running coprocessor pre-close hooks at 1732438184473Disabling compacts and flushes for region at 1732438184473Disabling writes for close at 1732438184473Writing region close event to WAL at 1732438184473Running coprocessor post-close hooks at 1732438184478 (+5 ms)Closed at 1732438184478 2024-11-24T08:49:44,478 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T08:49:44,478 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732438159519.df982e828e0c6e38afe8e86e86925bed. 2024-11-24T08:49:44,674 INFO [RS:0;469387a2cdb6:45991 {}] regionserver.HRegionServer(976): stopping server 469387a2cdb6,45991,1732438158575; all regions closed. 2024-11-24T08:49:44,674 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:44,674 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:44,675 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:44,675 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:44,675 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:44,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42351 is added to blk_1073741842_1025 (size=825) 2024-11-24T08:49:44,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39937 is added to blk_1073741842_1025 (size=825) 2024-11-24T08:49:44,826 INFO [regionserver/469387a2cdb6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:49:44,850 INFO [regionserver/469387a2cdb6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T08:49:44,850 INFO [regionserver/469387a2cdb6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T08:49:45,318 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:49:45,318 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T08:49:45,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T08:49:45,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:45,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:46,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:46,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:47,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:47,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:48,307 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T08:49:48,372 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.meta.1732438159340.meta after 4002ms 2024-11-24T08:49:48,373 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/WALs/469387a2cdb6,45991,1732438158575/469387a2cdb6%2C45991%2C1732438158575.meta.1732438159340.meta to hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/oldWALs/469387a2cdb6%2C45991%2C1732438158575.meta.1732438159340.meta 2024-11-24T08:49:48,380 DEBUG [RS:0;469387a2cdb6:45991 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/oldWALs 2024-11-24T08:49:48,380 INFO [RS:0;469387a2cdb6:45991 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 469387a2cdb6%2C45991%2C1732438158575.meta:.meta(num 1732438184364) 2024-11-24T08:49:48,380 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:48,381 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:48,381 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:48,381 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:48,381 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:48,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42351 is added to blk_1073741840_1023 (size=1162) 2024-11-24T08:49:48,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39937 is added to blk_1073741840_1023 (size=1162) 2024-11-24T08:49:48,388 DEBUG [RS:0;469387a2cdb6:45991 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/oldWALs 2024-11-24T08:49:48,388 INFO [RS:0;469387a2cdb6:45991 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 469387a2cdb6%2C45991%2C1732438158575:(num 1732438184316) 2024-11-24T08:49:48,388 DEBUG [RS:0;469387a2cdb6:45991 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:49:48,388 INFO [RS:0;469387a2cdb6:45991 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:49:48,388 INFO [RS:0;469387a2cdb6:45991 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:49:48,389 INFO [RS:0;469387a2cdb6:45991 {}] hbase.ChoreService(370): Chore service for: regionserver/469387a2cdb6:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-24T08:49:48,389 INFO [RS:0;469387a2cdb6:45991 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:49:48,389 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:49:48,389 INFO [RS:0;469387a2cdb6:45991 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45991 2024-11-24T08:49:48,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45991-0x10070ebc91e0001, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/469387a2cdb6,45991,1732438158575 2024-11-24T08:49:48,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:49:48,390 INFO [RS:0;469387a2cdb6:45991 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:49:48,391 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [469387a2cdb6,45991,1732438158575] 2024-11-24T08:49:48,392 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/469387a2cdb6,45991,1732438158575 already deleted, retry=false 2024-11-24T08:49:48,392 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 469387a2cdb6,45991,1732438158575 expired; onlineServers=0 2024-11-24T08:49:48,392 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '469387a2cdb6,46577,1732438158530' ***** 2024-11-24T08:49:48,392 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T08:49:48,392 INFO [M:0;469387a2cdb6:46577 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:49:48,393 INFO [M:0;469387a2cdb6:46577 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:49:48,393 DEBUG [M:0;469387a2cdb6:46577 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T08:49:48,393 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T08:49:48,393 DEBUG [M:0;469387a2cdb6:46577 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T08:49:48,393 DEBUG [master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.small.0-1732438158725 {}] cleaner.HFileCleaner(306): Exit Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.small.0-1732438158725,5,FailOnTimeoutGroup] 2024-11-24T08:49:48,393 DEBUG [master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.large.0-1732438158725 {}] cleaner.HFileCleaner(306): Exit Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.large.0-1732438158725,5,FailOnTimeoutGroup] 2024-11-24T08:49:48,393 INFO [M:0;469387a2cdb6:46577 {}] hbase.ChoreService(370): Chore service for: master/469387a2cdb6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T08:49:48,393 INFO [M:0;469387a2cdb6:46577 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:49:48,393 DEBUG [M:0;469387a2cdb6:46577 {}] master.HMaster(1795): Stopping service threads 2024-11-24T08:49:48,393 INFO [M:0;469387a2cdb6:46577 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T08:49:48,393 INFO [M:0;469387a2cdb6:46577 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:49:48,393 INFO [M:0;469387a2cdb6:46577 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T08:49:48,393 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T08:49:48,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T08:49:48,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:48,394 DEBUG [M:0;469387a2cdb6:46577 {}] zookeeper.ZKUtil(347): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T08:49:48,394 WARN [M:0;469387a2cdb6:46577 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T08:49:48,395 INFO [M:0;469387a2cdb6:46577 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/.lastflushedseqids 2024-11-24T08:49:48,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:48,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42351 is added to blk_1073741846_1030 (size=130) 2024-11-24T08:49:48,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39937 is added to blk_1073741846_1030 (size=130) 2024-11-24T08:49:48,400 INFO [M:0;469387a2cdb6:46577 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T08:49:48,400 INFO [M:0;469387a2cdb6:46577 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T08:49:48,400 DEBUG [M:0;469387a2cdb6:46577 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:49:48,400 INFO [M:0;469387a2cdb6:46577 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:49:48,400 DEBUG [M:0;469387a2cdb6:46577 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:49:48,400 DEBUG [M:0;469387a2cdb6:46577 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:49:48,400 DEBUG [M:0;469387a2cdb6:46577 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:49:48,400 INFO [M:0;469387a2cdb6:46577 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-24T08:49:48,401 ERROR [FSHLog-0-hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData-prefix:469387a2cdb6,46577,1732438158530 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36127,DS-bce299a1-4174-4a22-b417-838e45f8cdbb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:48,401 WARN [FSHLog-0-hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData-prefix:469387a2cdb6,46577,1732438158530 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36127,DS-bce299a1-4174-4a22-b417-838e45f8cdbb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:48,401 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 469387a2cdb6%2C46577%2C1732438158530:(num 1732438158660) roll requested 2024-11-24T08:49:48,401 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C46577%2C1732438158530.1732438188401 2024-11-24T08:49:48,405 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:48,406 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:48,406 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:48,406 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:48,406 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:48,406 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/WALs/469387a2cdb6,46577,1732438158530/469387a2cdb6%2C46577%2C1732438158530.1732438158660 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/WALs/469387a2cdb6,46577,1732438158530/469387a2cdb6%2C46577%2C1732438158530.1732438188401 2024-11-24T08:49:48,407 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36127,DS-bce299a1-4174-4a22-b417-838e45f8cdbb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:48,408 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36127,DS-bce299a1-4174-4a22-b417-838e45f8cdbb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:49:48,408 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/WALs/469387a2cdb6,46577,1732438158530/469387a2cdb6%2C46577%2C1732438158530.1732438158660 2024-11-24T08:49:48,408 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38047:38047),(127.0.0.1/127.0.0.1:35561:35561)] 2024-11-24T08:49:48,408 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/WALs/469387a2cdb6,46577,1732438158530/469387a2cdb6%2C46577%2C1732438158530.1732438158660 is not closed yet, will try archiving it next time 2024-11-24T08:49:48,408 WARN [IPC Server handler 4 on default port 38481 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/WALs/469387a2cdb6,46577,1732438158530/469387a2cdb6%2C46577%2C1732438158530.1732438158660 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-24T08:49:48,408 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/WALs/469387a2cdb6,46577,1732438158530/469387a2cdb6%2C46577%2C1732438158530.1732438158660 after 0ms 2024-11-24T08:49:48,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:48,424 DEBUG [M:0;469387a2cdb6:46577 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/17b9277012d149479880db9b3c173b9c is 82, key is hbase:meta,,1/info:regioninfo/1732438159370/Put/seqid=0 2024-11-24T08:49:48,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39937 is added to blk_1073741848_1033 (size=5672) 2024-11-24T08:49:48,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42351 is added to blk_1073741848_1033 (size=5672) 2024-11-24T08:49:48,429 INFO [M:0;469387a2cdb6:46577 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/17b9277012d149479880db9b3c173b9c 2024-11-24T08:49:48,447 DEBUG [M:0;469387a2cdb6:46577 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1c9a3da2a74d44d5b07057202f9c4dfe is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732438159903/Put/seqid=0 2024-11-24T08:49:48,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42351 is added to blk_1073741849_1034 (size=6119) 2024-11-24T08:49:48,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39937 is added to blk_1073741849_1034 (size=6119) 2024-11-24T08:49:48,452 INFO [M:0;469387a2cdb6:46577 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1c9a3da2a74d44d5b07057202f9c4dfe 2024-11-24T08:49:48,470 DEBUG [M:0;469387a2cdb6:46577 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/58e625d10b5e4af28d173904006858e8 is 69, key is 469387a2cdb6,45991,1732438158575/rs:state/1732438158811/Put/seqid=0 2024-11-24T08:49:48,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39937 is added to blk_1073741850_1035 (size=5156) 2024-11-24T08:49:48,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42351 is added to blk_1073741850_1035 (size=5156) 2024-11-24T08:49:48,477 INFO [M:0;469387a2cdb6:46577 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/58e625d10b5e4af28d173904006858e8 2024-11-24T08:49:48,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45991-0x10070ebc91e0001, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:49:48,492 INFO [RS:0;469387a2cdb6:45991 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:49:48,492 INFO [RS:0;469387a2cdb6:45991 {}] regionserver.HRegionServer(1031): Exiting; stopping=469387a2cdb6,45991,1732438158575; zookeeper connection closed. 2024-11-24T08:49:48,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45991-0x10070ebc91e0001, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:49:48,492 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6d0b764c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6d0b764c 2024-11-24T08:49:48,492 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T08:49:48,496 DEBUG [M:0;469387a2cdb6:46577 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d19435d61afa43209afbdad903addd82 is 52, key is load_balancer_on/state:d/1732438159513/Put/seqid=0 2024-11-24T08:49:48,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39937 is added to blk_1073741851_1036 (size=5056) 2024-11-24T08:49:48,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42351 is added to blk_1073741851_1036 (size=5056) 2024-11-24T08:49:48,501 INFO [M:0;469387a2cdb6:46577 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d19435d61afa43209afbdad903addd82 2024-11-24T08:49:48,506 DEBUG [M:0;469387a2cdb6:46577 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/17b9277012d149479880db9b3c173b9c as hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/17b9277012d149479880db9b3c173b9c 2024-11-24T08:49:48,509 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T08:49:48,511 INFO [M:0;469387a2cdb6:46577 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/17b9277012d149479880db9b3c173b9c, entries=8, sequenceid=56, filesize=5.5 K 2024-11-24T08:49:48,512 DEBUG [M:0;469387a2cdb6:46577 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1c9a3da2a74d44d5b07057202f9c4dfe as hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1c9a3da2a74d44d5b07057202f9c4dfe 2024-11-24T08:49:48,517 INFO [M:0;469387a2cdb6:46577 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1c9a3da2a74d44d5b07057202f9c4dfe, entries=6, sequenceid=56, filesize=6.0 K 2024-11-24T08:49:48,518 DEBUG [M:0;469387a2cdb6:46577 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/58e625d10b5e4af28d173904006858e8 as hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/58e625d10b5e4af28d173904006858e8 2024-11-24T08:49:48,524 INFO [M:0;469387a2cdb6:46577 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/58e625d10b5e4af28d173904006858e8, entries=1, sequenceid=56, filesize=5.0 K 2024-11-24T08:49:48,525 DEBUG [M:0;469387a2cdb6:46577 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d19435d61afa43209afbdad903addd82 as hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d19435d61afa43209afbdad903addd82 2024-11-24T08:49:48,531 INFO [M:0;469387a2cdb6:46577 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d19435d61afa43209afbdad903addd82, entries=1, sequenceid=56, filesize=4.9 K 2024-11-24T08:49:48,532 INFO [M:0;469387a2cdb6:46577 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=56, compaction requested=false 2024-11-24T08:49:48,533 INFO [M:0;469387a2cdb6:46577 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:49:48,533 DEBUG [M:0;469387a2cdb6:46577 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732438188400Disabling compacts and flushes for region at 1732438188400Disabling writes for close at 1732438188400Obtaining lock to block concurrent updates at 1732438188401 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732438188401Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1732438188401Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732438188408 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732438188409 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732438188423 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732438188423Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732438188434 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732438188446 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732438188447 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732438188456 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732438188469 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732438188469Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732438188481 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732438188495 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732438188495Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c25b1dc: reopening flushed file at 1732438188505 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7430ae38: reopening flushed file at 1732438188511 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6caab6db: reopening flushed file at 1732438188517 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45c95bdc: reopening flushed file at 1732438188524 (+7 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=56, compaction requested=false at 1732438188532 (+8 ms)Writing region close event to WAL at 1732438188533 (+1 ms)Closed at 1732438188533 2024-11-24T08:49:48,534 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:48,534 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:48,534 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:48,534 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:48,534 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:49:48,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42351 is added to blk_1073741847_1031 (size=757) 2024-11-24T08:49:48,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39937 is added to blk_1073741847_1031 (size=757) 2024-11-24T08:49:49,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:49,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:49,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:49,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:49,501 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:49,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:49,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:49,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:49,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:49,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:49,505 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:49,506 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:49,506 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:49,508 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:49,511 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:49,512 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:50,015 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T08:49:50,017 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:50,017 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:50,018 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:50,018 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:50,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:50,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:50,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:50,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:50,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:50,045 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:50,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:50,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:50,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:50,051 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:50,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:50,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:51,308 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T08:49:51,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:51,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:52,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:52,409 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/WALs/469387a2cdb6,46577,1732438158530/469387a2cdb6%2C46577%2C1732438158530.1732438158660 after 4001ms 2024-11-24T08:49:52,410 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/WALs/469387a2cdb6,46577,1732438158530/469387a2cdb6%2C46577%2C1732438158530.1732438158660 to hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/oldWALs/469387a2cdb6%2C46577%2C1732438158530.1732438158660 2024-11-24T08:49:52,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:52,415 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/MasterData/oldWALs/469387a2cdb6%2C46577%2C1732438158530.1732438158660 to hdfs://localhost:38481/user/jenkins/test-data/563e417d-9409-c14c-5bca-08cd06496d55/oldWALs/469387a2cdb6%2C46577%2C1732438158530.1732438158660$masterlocalwal$ 2024-11-24T08:49:52,415 INFO [M:0;469387a2cdb6:46577 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T08:49:52,415 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:49:52,415 INFO [M:0;469387a2cdb6:46577 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46577 2024-11-24T08:49:52,415 INFO [M:0;469387a2cdb6:46577 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:49:52,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:49:52,517 INFO [M:0;469387a2cdb6:46577 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:49:52,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46577-0x10070ebc91e0000, quorum=127.0.0.1:52517, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:49:52,520 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4856962{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:49:52,520 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@17ea7577{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:49:52,521 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:49:52,521 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4da55ed8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:49:52,521 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18e7d289{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/hadoop.log.dir/,STOPPED} 2024-11-24T08:49:52,522 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:49:52,522 WARN [BP-590959869-172.17.0.2-1732438157948 heartbeating to localhost/127.0.0.1:38481 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:49:52,522 WARN [BP-590959869-172.17.0.2-1732438157948 heartbeating to localhost/127.0.0.1:38481 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-590959869-172.17.0.2-1732438157948 (Datanode Uuid 4ab5b28e-fd22-45e1-bb99-28e8f51d553b) service to localhost/127.0.0.1:38481 2024-11-24T08:49:52,522 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:49:52,523 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/cluster_415cfaf0-356b-7498-76a9-57da45216436/data/data3/current/BP-590959869-172.17.0.2-1732438157948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:49:52,524 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/cluster_415cfaf0-356b-7498-76a9-57da45216436/data/data4/current/BP-590959869-172.17.0.2-1732438157948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:49:52,524 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:49:52,527 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b5d2e48{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:49:52,527 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@76ee45be{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:49:52,527 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:49:52,527 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1543fd34{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:49:52,527 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@79c2c9b7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/hadoop.log.dir/,STOPPED} 2024-11-24T08:49:52,528 WARN [BP-590959869-172.17.0.2-1732438157948 heartbeating to localhost/127.0.0.1:38481 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:49:52,529 WARN [BP-590959869-172.17.0.2-1732438157948 heartbeating to localhost/127.0.0.1:38481 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-590959869-172.17.0.2-1732438157948 (Datanode Uuid 7a87b268-3c7d-4052-ae7d-66aa3b889c69) service to localhost/127.0.0.1:38481 2024-11-24T08:49:52,529 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:49:52,529 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:49:52,529 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/cluster_415cfaf0-356b-7498-76a9-57da45216436/data/data1/current/BP-590959869-172.17.0.2-1732438157948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:49:52,530 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/cluster_415cfaf0-356b-7498-76a9-57da45216436/data/data2/current/BP-590959869-172.17.0.2-1732438157948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:49:52,530 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:49:52,537 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@295c5a84{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:49:52,537 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@42450b2b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:49:52,537 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:49:52,538 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@500b485f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:49:52,538 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4284e6e3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/hadoop.log.dir/,STOPPED} 2024-11-24T08:49:52,544 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T08:49:52,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T08:49:52,567 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 152) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38481 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:38481 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38481 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38481 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:38481 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:38481 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38481 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:38481 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=93 (was 107), ProcessCount=11 (was 11), AvailableMemoryMB=1432 (was 1689) 2024-11-24T08:49:52,573 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=93, ProcessCount=11, AvailableMemoryMB=1432 2024-11-24T08:49:52,574 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T08:49:52,574 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/hadoop.log.dir so I do NOT create it in target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6 2024-11-24T08:49:52,574 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9e1c9714-e8fd-0451-86f6-ce2b067ff2eb/hadoop.tmp.dir so I do NOT create it in target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6 2024-11-24T08:49:52,574 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/cluster_515c9a62-d591-cfff-8e97-71830b516b1f, deleteOnExit=true 2024-11-24T08:49:52,574 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T08:49:52,574 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/test.cache.data in system properties and HBase conf 2024-11-24T08:49:52,574 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T08:49:52,574 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/hadoop.log.dir in system properties and HBase conf 2024-11-24T08:49:52,574 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T08:49:52,574 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T08:49:52,574 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T08:49:52,574 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T08:49:52,575 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:49:52,575 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:49:52,575 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T08:49:52,575 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:49:52,575 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T08:49:52,575 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T08:49:52,575 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:49:52,575 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:49:52,575 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T08:49:52,575 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/nfs.dump.dir in system properties and HBase conf 2024-11-24T08:49:52,575 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/java.io.tmpdir in system properties and HBase conf 2024-11-24T08:49:52,575 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:49:52,575 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T08:49:52,575 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T08:49:52,589 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:49:52,635 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:49:52,639 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:49:52,641 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:49:52,641 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:49:52,641 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:49:52,641 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:49:52,642 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a430ed9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:49:52,642 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70e37295{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:49:52,735 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6298d5fb{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/java.io.tmpdir/jetty-localhost-33263-hadoop-hdfs-3_4_1-tests_jar-_-any-9046047043056784678/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:49:52,735 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@26faf95f{HTTP/1.1, (http/1.1)}{localhost:33263} 2024-11-24T08:49:52,735 INFO [Time-limited test {}] server.Server(415): Started @183352ms 2024-11-24T08:49:52,746 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:49:52,793 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:49:52,796 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:49:52,797 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:49:52,797 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:49:52,797 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:49:52,798 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f882a6c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:49:52,798 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7bbcea3a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:49:52,892 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3230f8f4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/java.io.tmpdir/jetty-localhost-40487-hadoop-hdfs-3_4_1-tests_jar-_-any-15167761578505690854/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:49:52,893 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@61363b5b{HTTP/1.1, (http/1.1)}{localhost:40487} 2024-11-24T08:49:52,893 INFO [Time-limited test {}] server.Server(415): Started @183510ms 2024-11-24T08:49:52,894 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:49:52,919 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:49:52,921 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:49:52,922 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:49:52,922 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:49:52,922 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:49:52,922 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37c41708{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:49:52,923 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d8b64e8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:49:52,949 WARN [Thread-1633 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/cluster_515c9a62-d591-cfff-8e97-71830b516b1f/data/data2/current/BP-459801175-172.17.0.2-1732438192599/current, will proceed with Du for space computation calculation, 2024-11-24T08:49:52,949 WARN [Thread-1632 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/cluster_515c9a62-d591-cfff-8e97-71830b516b1f/data/data1/current/BP-459801175-172.17.0.2-1732438192599/current, will proceed with Du for space computation calculation, 2024-11-24T08:49:52,968 WARN [Thread-1611 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:49:52,970 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x47afcfdcadd22deb with lease ID 0x80a62d9ae595a11e: Processing first storage report for DS-54d2eaf1-3b00-4bc6-9934-6d67833f58c4 from datanode DatanodeRegistration(127.0.0.1:43887, datanodeUuid=c3136198-f274-49e3-9c41-40b9d8a33bdc, infoPort=36863, infoSecurePort=0, ipcPort=41841, storageInfo=lv=-57;cid=testClusterID;nsid=1037138334;c=1732438192599) 2024-11-24T08:49:52,970 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x47afcfdcadd22deb with lease ID 0x80a62d9ae595a11e: from storage DS-54d2eaf1-3b00-4bc6-9934-6d67833f58c4 node DatanodeRegistration(127.0.0.1:43887, datanodeUuid=c3136198-f274-49e3-9c41-40b9d8a33bdc, infoPort=36863, infoSecurePort=0, ipcPort=41841, storageInfo=lv=-57;cid=testClusterID;nsid=1037138334;c=1732438192599), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:49:52,970 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x47afcfdcadd22deb with lease ID 0x80a62d9ae595a11e: Processing first storage report for DS-28ed8925-70f0-4811-8e26-7fde67ddc552 from datanode DatanodeRegistration(127.0.0.1:43887, datanodeUuid=c3136198-f274-49e3-9c41-40b9d8a33bdc, infoPort=36863, infoSecurePort=0, ipcPort=41841, storageInfo=lv=-57;cid=testClusterID;nsid=1037138334;c=1732438192599) 2024-11-24T08:49:52,970 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x47afcfdcadd22deb with lease ID 0x80a62d9ae595a11e: from storage DS-28ed8925-70f0-4811-8e26-7fde67ddc552 node DatanodeRegistration(127.0.0.1:43887, datanodeUuid=c3136198-f274-49e3-9c41-40b9d8a33bdc, infoPort=36863, infoSecurePort=0, ipcPort=41841, storageInfo=lv=-57;cid=testClusterID;nsid=1037138334;c=1732438192599), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:49:53,022 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3fad1127{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/java.io.tmpdir/jetty-localhost-45491-hadoop-hdfs-3_4_1-tests_jar-_-any-12299185625747593017/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:49:53,023 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3fa77a2d{HTTP/1.1, (http/1.1)}{localhost:45491} 2024-11-24T08:49:53,023 INFO [Time-limited test {}] server.Server(415): Started @183640ms 2024-11-24T08:49:53,024 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:49:53,078 WARN [Thread-1659 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/cluster_515c9a62-d591-cfff-8e97-71830b516b1f/data/data4/current/BP-459801175-172.17.0.2-1732438192599/current, will proceed with Du for space computation calculation, 2024-11-24T08:49:53,078 WARN [Thread-1658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/cluster_515c9a62-d591-cfff-8e97-71830b516b1f/data/data3/current/BP-459801175-172.17.0.2-1732438192599/current, will proceed with Du for space computation calculation, 2024-11-24T08:49:53,099 WARN [Thread-1647 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:49:53,101 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x38fbae6838245e47 with lease ID 0x80a62d9ae595a11f: Processing first storage report for DS-890f3076-d61e-4c5b-acee-80b93841ed7b from datanode DatanodeRegistration(127.0.0.1:46073, datanodeUuid=4435cdb2-40e7-400c-8bb2-dd489c39f08a, infoPort=40269, infoSecurePort=0, ipcPort=41145, storageInfo=lv=-57;cid=testClusterID;nsid=1037138334;c=1732438192599) 2024-11-24T08:49:53,101 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x38fbae6838245e47 with lease ID 0x80a62d9ae595a11f: from storage DS-890f3076-d61e-4c5b-acee-80b93841ed7b node DatanodeRegistration(127.0.0.1:46073, datanodeUuid=4435cdb2-40e7-400c-8bb2-dd489c39f08a, infoPort=40269, infoSecurePort=0, ipcPort=41145, storageInfo=lv=-57;cid=testClusterID;nsid=1037138334;c=1732438192599), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:49:53,101 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x38fbae6838245e47 with lease ID 0x80a62d9ae595a11f: Processing first storage report for DS-13d1b181-6285-4513-bedc-568e74dbcc7f from datanode DatanodeRegistration(127.0.0.1:46073, datanodeUuid=4435cdb2-40e7-400c-8bb2-dd489c39f08a, infoPort=40269, infoSecurePort=0, ipcPort=41145, storageInfo=lv=-57;cid=testClusterID;nsid=1037138334;c=1732438192599) 2024-11-24T08:49:53,101 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x38fbae6838245e47 with lease ID 0x80a62d9ae595a11f: from storage DS-13d1b181-6285-4513-bedc-568e74dbcc7f node DatanodeRegistration(127.0.0.1:46073, datanodeUuid=4435cdb2-40e7-400c-8bb2-dd489c39f08a, infoPort=40269, infoSecurePort=0, ipcPort=41145, storageInfo=lv=-57;cid=testClusterID;nsid=1037138334;c=1732438192599), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:49:53,145 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6 2024-11-24T08:49:53,148 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/cluster_515c9a62-d591-cfff-8e97-71830b516b1f/zookeeper_0, clientPort=53092, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/cluster_515c9a62-d591-cfff-8e97-71830b516b1f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/cluster_515c9a62-d591-cfff-8e97-71830b516b1f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T08:49:53,149 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53092 2024-11-24T08:49:53,150 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:49:53,151 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:49:53,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:49:53,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:49:53,161 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07 with version=8 2024-11-24T08:49:53,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/hbase-staging 2024-11-24T08:49:53,164 INFO [Time-limited test {}] client.ConnectionUtils(128): master/469387a2cdb6:0 server-side Connection retries=45 2024-11-24T08:49:53,164 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:49:53,164 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:49:53,164 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:49:53,164 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:49:53,164 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:49:53,164 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T08:49:53,164 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:49:53,165 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41949 2024-11-24T08:49:53,167 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41949 connecting to ZooKeeper ensemble=127.0.0.1:53092 2024-11-24T08:49:53,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:419490x0, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:49:53,171 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41949-0x10070ec506a0000 connected 2024-11-24T08:49:53,185 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:49:53,187 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:49:53,189 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:49:53,189 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07, hbase.cluster.distributed=false 2024-11-24T08:49:53,191 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:49:53,192 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41949 2024-11-24T08:49:53,192 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41949 2024-11-24T08:49:53,192 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41949 2024-11-24T08:49:53,192 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41949 2024-11-24T08:49:53,192 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41949 2024-11-24T08:49:53,208 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/469387a2cdb6:0 server-side Connection retries=45 2024-11-24T08:49:53,208 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:49:53,208 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:49:53,209 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:49:53,209 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:49:53,209 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:49:53,209 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T08:49:53,209 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:49:53,209 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45403 2024-11-24T08:49:53,211 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45403 connecting to ZooKeeper ensemble=127.0.0.1:53092 2024-11-24T08:49:53,211 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:49:53,213 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:49:53,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:454030x0, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:49:53,216 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45403-0x10070ec506a0001 connected 2024-11-24T08:49:53,216 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45403-0x10070ec506a0001, quorum=127.0.0.1:53092, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:49:53,216 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T08:49:53,217 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T08:49:53,217 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45403-0x10070ec506a0001, quorum=127.0.0.1:53092, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T08:49:53,218 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45403-0x10070ec506a0001, quorum=127.0.0.1:53092, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:49:53,219 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45403 2024-11-24T08:49:53,219 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45403 2024-11-24T08:49:53,219 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45403 2024-11-24T08:49:53,219 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45403 2024-11-24T08:49:53,220 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45403 2024-11-24T08:49:53,231 DEBUG [M:0;469387a2cdb6:41949 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;469387a2cdb6:41949 2024-11-24T08:49:53,232 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/469387a2cdb6,41949,1732438193163 2024-11-24T08:49:53,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45403-0x10070ec506a0001, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:49:53,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:49:53,233 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/469387a2cdb6,41949,1732438193163 2024-11-24T08:49:53,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45403-0x10070ec506a0001, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T08:49:53,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:53,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45403-0x10070ec506a0001, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:53,234 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T08:49:53,235 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/469387a2cdb6,41949,1732438193163 from backup master directory 2024-11-24T08:49:53,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/469387a2cdb6,41949,1732438193163 2024-11-24T08:49:53,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:49:53,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45403-0x10070ec506a0001, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:49:53,235 WARN [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:49:53,235 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=469387a2cdb6,41949,1732438193163 2024-11-24T08:49:53,239 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/hbase.id] with ID: 564989dd-ccf6-4dde-a2a4-aecbfea7be75 2024-11-24T08:49:53,240 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/.tmp/hbase.id 2024-11-24T08:49:53,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:49:53,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:49:53,246 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/.tmp/hbase.id]:[hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/hbase.id] 2024-11-24T08:49:53,257 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:49:53,257 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T08:49:53,258 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-24T08:49:53,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45403-0x10070ec506a0001, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:53,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:53,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:49:53,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:49:53,266 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:49:53,267 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T08:49:53,267 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:49:53,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:49:53,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:49:53,275 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store 2024-11-24T08:49:53,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:49:53,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:49:53,282 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:49:53,282 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:49:53,282 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:49:53,282 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:49:53,282 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:49:53,282 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:49:53,282 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:49:53,282 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732438193282Disabling compacts and flushes for region at 1732438193282Disabling writes for close at 1732438193282Writing region close event to WAL at 1732438193282Closed at 1732438193282 2024-11-24T08:49:53,283 WARN [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/.initializing 2024-11-24T08:49:53,283 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/WALs/469387a2cdb6,41949,1732438193163 2024-11-24T08:49:53,285 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=469387a2cdb6%2C41949%2C1732438193163, suffix=, logDir=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/WALs/469387a2cdb6,41949,1732438193163, archiveDir=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/oldWALs, maxLogs=10 2024-11-24T08:49:53,285 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C41949%2C1732438193163.1732438193285 2024-11-24T08:49:53,289 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/WALs/469387a2cdb6,41949,1732438193163/469387a2cdb6%2C41949%2C1732438193163.1732438193285 2024-11-24T08:49:53,293 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40269:40269),(127.0.0.1/127.0.0.1:36863:36863)] 2024-11-24T08:49:53,297 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:49:53,297 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:49:53,297 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:53,297 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:53,298 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:53,300 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T08:49:53,300 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:53,300 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:49:53,300 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:53,302 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T08:49:53,302 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:53,302 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:49:53,302 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:53,303 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T08:49:53,303 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:53,304 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:49:53,304 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:53,305 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T08:49:53,305 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:53,305 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:49:53,305 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:53,306 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:53,306 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:53,307 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:53,307 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:53,308 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T08:49:53,309 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:49:53,311 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:49:53,311 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=795285, jitterRate=0.011257931590080261}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T08:49:53,312 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732438193297Initializing all the Stores at 1732438193298 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438193298Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438193298Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438193298Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438193298Cleaning up temporary data from old regions at 1732438193307 (+9 ms)Region opened successfully at 1732438193312 (+5 ms) 2024-11-24T08:49:53,312 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T08:49:53,315 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ab1f95e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=469387a2cdb6/172.17.0.2:0 2024-11-24T08:49:53,315 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T08:49:53,315 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T08:49:53,315 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T08:49:53,316 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T08:49:53,316 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T08:49:53,316 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T08:49:53,316 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T08:49:53,318 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T08:49:53,319 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T08:49:53,320 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T08:49:53,320 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T08:49:53,320 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T08:49:53,321 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T08:49:53,321 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T08:49:53,322 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T08:49:53,323 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T08:49:53,324 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T08:49:53,324 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T08:49:53,326 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T08:49:53,327 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T08:49:53,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:49:53,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45403-0x10070ec506a0001, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:49:53,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45403-0x10070ec506a0001, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:53,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:53,328 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=469387a2cdb6,41949,1732438193163, sessionid=0x10070ec506a0000, setting cluster-up flag (Was=false) 2024-11-24T08:49:53,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:53,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45403-0x10070ec506a0001, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:53,332 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T08:49:53,333 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=469387a2cdb6,41949,1732438193163 2024-11-24T08:49:53,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:53,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45403-0x10070ec506a0001, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:53,338 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T08:49:53,339 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=469387a2cdb6,41949,1732438193163 2024-11-24T08:49:53,340 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T08:49:53,341 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T08:49:53,341 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T08:49:53,341 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T08:49:53,341 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 469387a2cdb6,41949,1732438193163 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T08:49:53,342 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:49:53,343 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:49:53,343 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:49:53,343 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:49:53,343 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/469387a2cdb6:0, corePoolSize=10, maxPoolSize=10 2024-11-24T08:49:53,343 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:53,343 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/469387a2cdb6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:49:53,343 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:53,344 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732438223344 2024-11-24T08:49:53,344 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T08:49:53,344 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T08:49:53,344 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T08:49:53,344 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T08:49:53,344 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T08:49:53,344 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T08:49:53,344 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:49:53,344 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T08:49:53,344 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:53,345 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T08:49:53,345 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T08:49:53,345 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T08:49:53,345 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T08:49:53,345 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T08:49:53,345 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.large.0-1732438193345,5,FailOnTimeoutGroup] 2024-11-24T08:49:53,345 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:53,345 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.small.0-1732438193345,5,FailOnTimeoutGroup] 2024-11-24T08:49:53,345 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:53,345 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T08:49:53,345 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:53,345 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:53,345 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T08:49:53,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:49:53,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:49:53,352 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T08:49:53,353 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07 2024-11-24T08:49:53,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:49:53,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:49:53,359 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:49:53,361 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:49:53,362 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:49:53,362 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:53,363 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:49:53,363 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:49:53,364 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:49:53,364 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:53,365 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:49:53,365 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:49:53,367 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:49:53,367 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:53,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:49:53,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:49:53,369 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:49:53,369 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:53,369 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:49:53,369 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:49:53,370 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/hbase/meta/1588230740 2024-11-24T08:49:53,370 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/hbase/meta/1588230740 2024-11-24T08:49:53,372 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:49:53,372 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:49:53,372 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:49:53,374 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:49:53,376 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:49:53,376 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=877327, jitterRate=0.1155795156955719}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:49:53,377 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732438193359Initializing all the Stores at 1732438193360 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438193361 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438193361Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438193361Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438193361Cleaning up temporary data from old regions at 1732438193372 (+11 ms)Region opened successfully at 1732438193377 (+5 ms) 2024-11-24T08:49:53,377 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:49:53,377 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:49:53,377 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:49:53,377 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:49:53,377 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:49:53,378 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:49:53,378 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732438193377Disabling compacts and flushes for region at 1732438193377Disabling writes for close at 1732438193377Writing region close event to WAL at 1732438193378 (+1 ms)Closed at 1732438193378 2024-11-24T08:49:53,379 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:49:53,379 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T08:49:53,379 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T08:49:53,381 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:49:53,382 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T08:49:53,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:53,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:53,422 INFO [RS:0;469387a2cdb6:45403 {}] regionserver.HRegionServer(746): ClusterId : 564989dd-ccf6-4dde-a2a4-aecbfea7be75 2024-11-24T08:49:53,422 DEBUG [RS:0;469387a2cdb6:45403 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T08:49:53,426 DEBUG [RS:0;469387a2cdb6:45403 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T08:49:53,426 DEBUG [RS:0;469387a2cdb6:45403 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T08:49:53,429 DEBUG [RS:0;469387a2cdb6:45403 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T08:49:53,430 DEBUG [RS:0;469387a2cdb6:45403 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5244283e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=469387a2cdb6/172.17.0.2:0 2024-11-24T08:49:53,445 DEBUG [RS:0;469387a2cdb6:45403 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;469387a2cdb6:45403 2024-11-24T08:49:53,445 INFO [RS:0;469387a2cdb6:45403 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T08:49:53,445 INFO [RS:0;469387a2cdb6:45403 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T08:49:53,445 DEBUG [RS:0;469387a2cdb6:45403 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T08:49:53,445 INFO [RS:0;469387a2cdb6:45403 {}] regionserver.HRegionServer(2659): reportForDuty to master=469387a2cdb6,41949,1732438193163 with port=45403, startcode=1732438193208 2024-11-24T08:49:53,446 DEBUG [RS:0;469387a2cdb6:45403 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T08:49:53,447 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42997, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T08:49:53,448 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41949 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 469387a2cdb6,45403,1732438193208 2024-11-24T08:49:53,448 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41949 {}] master.ServerManager(517): Registering regionserver=469387a2cdb6,45403,1732438193208 2024-11-24T08:49:53,449 DEBUG [RS:0;469387a2cdb6:45403 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07 2024-11-24T08:49:53,449 DEBUG [RS:0;469387a2cdb6:45403 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45159 2024-11-24T08:49:53,449 DEBUG [RS:0;469387a2cdb6:45403 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T08:49:53,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:49:53,451 DEBUG [RS:0;469387a2cdb6:45403 {}] zookeeper.ZKUtil(111): regionserver:45403-0x10070ec506a0001, quorum=127.0.0.1:53092, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/469387a2cdb6,45403,1732438193208 2024-11-24T08:49:53,451 WARN [RS:0;469387a2cdb6:45403 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:49:53,451 INFO [RS:0;469387a2cdb6:45403 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:49:53,451 DEBUG [RS:0;469387a2cdb6:45403 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/WALs/469387a2cdb6,45403,1732438193208 2024-11-24T08:49:53,451 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [469387a2cdb6,45403,1732438193208] 2024-11-24T08:49:53,454 INFO [RS:0;469387a2cdb6:45403 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T08:49:53,456 INFO [RS:0;469387a2cdb6:45403 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T08:49:53,456 INFO [RS:0;469387a2cdb6:45403 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T08:49:53,456 INFO [RS:0;469387a2cdb6:45403 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:53,456 INFO [RS:0;469387a2cdb6:45403 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T08:49:53,457 INFO [RS:0;469387a2cdb6:45403 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T08:49:53,457 INFO [RS:0;469387a2cdb6:45403 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:53,457 DEBUG [RS:0;469387a2cdb6:45403 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:53,458 DEBUG [RS:0;469387a2cdb6:45403 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:53,458 DEBUG [RS:0;469387a2cdb6:45403 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:53,458 DEBUG [RS:0;469387a2cdb6:45403 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:53,458 DEBUG [RS:0;469387a2cdb6:45403 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:53,458 DEBUG [RS:0;469387a2cdb6:45403 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/469387a2cdb6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:49:53,458 DEBUG [RS:0;469387a2cdb6:45403 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:53,458 DEBUG [RS:0;469387a2cdb6:45403 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:53,458 DEBUG [RS:0;469387a2cdb6:45403 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:53,458 DEBUG [RS:0;469387a2cdb6:45403 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:53,458 DEBUG [RS:0;469387a2cdb6:45403 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:53,458 DEBUG [RS:0;469387a2cdb6:45403 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:49:53,458 DEBUG [RS:0;469387a2cdb6:45403 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/469387a2cdb6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:49:53,458 DEBUG [RS:0;469387a2cdb6:45403 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:49:53,461 INFO [RS:0;469387a2cdb6:45403 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:53,461 INFO [RS:0;469387a2cdb6:45403 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:53,461 INFO [RS:0;469387a2cdb6:45403 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:53,461 INFO [RS:0;469387a2cdb6:45403 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:53,461 INFO [RS:0;469387a2cdb6:45403 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:53,461 INFO [RS:0;469387a2cdb6:45403 {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,45403,1732438193208-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:49:53,475 INFO [RS:0;469387a2cdb6:45403 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T08:49:53,475 INFO [RS:0;469387a2cdb6:45403 {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,45403,1732438193208-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:53,475 INFO [RS:0;469387a2cdb6:45403 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:53,475 INFO [RS:0;469387a2cdb6:45403 {}] regionserver.Replication(171): 469387a2cdb6,45403,1732438193208 started 2024-11-24T08:49:53,488 INFO [RS:0;469387a2cdb6:45403 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:53,488 INFO [RS:0;469387a2cdb6:45403 {}] regionserver.HRegionServer(1482): Serving as 469387a2cdb6,45403,1732438193208, RpcServer on 469387a2cdb6/172.17.0.2:45403, sessionid=0x10070ec506a0001 2024-11-24T08:49:53,488 DEBUG [RS:0;469387a2cdb6:45403 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T08:49:53,488 DEBUG [RS:0;469387a2cdb6:45403 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 469387a2cdb6,45403,1732438193208 2024-11-24T08:49:53,488 DEBUG [RS:0;469387a2cdb6:45403 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '469387a2cdb6,45403,1732438193208' 2024-11-24T08:49:53,488 DEBUG [RS:0;469387a2cdb6:45403 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T08:49:53,489 DEBUG [RS:0;469387a2cdb6:45403 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T08:49:53,489 DEBUG [RS:0;469387a2cdb6:45403 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T08:49:53,489 DEBUG [RS:0;469387a2cdb6:45403 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T08:49:53,489 DEBUG [RS:0;469387a2cdb6:45403 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 469387a2cdb6,45403,1732438193208 2024-11-24T08:49:53,489 DEBUG [RS:0;469387a2cdb6:45403 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '469387a2cdb6,45403,1732438193208' 2024-11-24T08:49:53,489 DEBUG [RS:0;469387a2cdb6:45403 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T08:49:53,489 DEBUG [RS:0;469387a2cdb6:45403 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T08:49:53,490 DEBUG [RS:0;469387a2cdb6:45403 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T08:49:53,490 INFO [RS:0;469387a2cdb6:45403 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T08:49:53,490 INFO [RS:0;469387a2cdb6:45403 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T08:49:53,532 WARN [469387a2cdb6:41949 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T08:49:53,593 INFO [RS:0;469387a2cdb6:45403 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=469387a2cdb6%2C45403%2C1732438193208, suffix=, logDir=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/WALs/469387a2cdb6,45403,1732438193208, archiveDir=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/oldWALs, maxLogs=32 2024-11-24T08:49:53,593 INFO [RS:0;469387a2cdb6:45403 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C45403%2C1732438193208.1732438193593 2024-11-24T08:49:53,600 INFO [RS:0;469387a2cdb6:45403 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/WALs/469387a2cdb6,45403,1732438193208/469387a2cdb6%2C45403%2C1732438193208.1732438193593 2024-11-24T08:49:53,602 DEBUG [RS:0;469387a2cdb6:45403 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36863:36863),(127.0.0.1/127.0.0.1:40269:40269)] 2024-11-24T08:49:53,783 DEBUG [469387a2cdb6:41949 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T08:49:53,784 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=469387a2cdb6,45403,1732438193208 2024-11-24T08:49:53,788 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 469387a2cdb6,45403,1732438193208, state=OPENING 2024-11-24T08:49:53,790 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T08:49:53,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:53,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45403-0x10070ec506a0001, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:49:53,793 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:49:53,793 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:49:53,793 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:49:53,794 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=469387a2cdb6,45403,1732438193208}] 2024-11-24T08:49:53,948 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T08:49:53,952 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34141, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T08:49:53,959 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T08:49:53,959 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:49:53,962 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=469387a2cdb6%2C45403%2C1732438193208.meta, suffix=.meta, logDir=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/WALs/469387a2cdb6,45403,1732438193208, archiveDir=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/oldWALs, maxLogs=32 2024-11-24T08:49:53,962 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C45403%2C1732438193208.meta.1732438193962.meta 2024-11-24T08:49:53,967 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/WALs/469387a2cdb6,45403,1732438193208/469387a2cdb6%2C45403%2C1732438193208.meta.1732438193962.meta 2024-11-24T08:49:53,969 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40269:40269),(127.0.0.1/127.0.0.1:36863:36863)] 2024-11-24T08:49:53,969 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:49:53,970 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T08:49:53,970 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T08:49:53,970 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T08:49:53,970 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T08:49:53,970 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:49:53,970 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T08:49:53,970 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T08:49:53,971 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:49:53,972 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:49:53,972 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:53,973 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:49:53,973 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:49:53,974 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:49:53,974 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:53,974 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:49:53,974 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:49:53,975 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:49:53,975 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:53,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:49:53,976 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:49:53,976 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:49:53,976 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:53,977 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:49:53,977 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:49:53,978 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/hbase/meta/1588230740 2024-11-24T08:49:53,979 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/hbase/meta/1588230740 2024-11-24T08:49:53,981 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:49:53,981 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:49:53,981 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:49:53,983 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:49:53,984 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=770204, jitterRate=-0.020635992288589478}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:49:53,984 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T08:49:53,985 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732438193970Writing region info on filesystem at 1732438193970Initializing all the Stores at 1732438193971 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438193971Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438193971Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438193971Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438193971Cleaning up temporary data from old regions at 1732438193981 (+10 ms)Running coprocessor post-open hooks at 1732438193984 (+3 ms)Region opened successfully at 1732438193985 (+1 ms) 2024-11-24T08:49:53,986 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732438193947 2024-11-24T08:49:53,989 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T08:49:53,990 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T08:49:53,991 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=469387a2cdb6,45403,1732438193208 2024-11-24T08:49:53,992 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 469387a2cdb6,45403,1732438193208, state=OPEN 2024-11-24T08:49:53,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:49:53,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45403-0x10070ec506a0001, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:49:53,995 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=469387a2cdb6,45403,1732438193208 2024-11-24T08:49:53,995 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:49:53,995 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:49:53,998 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T08:49:53,998 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=469387a2cdb6,45403,1732438193208 in 201 msec 2024-11-24T08:49:54,001 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T08:49:54,001 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 619 msec 2024-11-24T08:49:54,002 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:49:54,002 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T08:49:54,003 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:49:54,003 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=469387a2cdb6,45403,1732438193208, seqNum=-1] 2024-11-24T08:49:54,004 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:49:54,006 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50081, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:49:54,012 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 670 msec 2024-11-24T08:49:54,012 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732438194012, completionTime=-1 2024-11-24T08:49:54,012 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T08:49:54,013 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T08:49:54,015 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T08:49:54,015 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732438254015 2024-11-24T08:49:54,015 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732438314015 2024-11-24T08:49:54,015 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-24T08:49:54,015 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,41949,1732438193163-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:54,015 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,41949,1732438193163-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:54,015 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,41949,1732438193163-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:54,015 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-469387a2cdb6:41949, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:54,015 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:54,016 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:54,017 DEBUG [master/469387a2cdb6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T08:49:54,019 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.783sec 2024-11-24T08:49:54,020 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T08:49:54,020 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T08:49:54,020 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T08:49:54,020 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T08:49:54,020 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T08:49:54,020 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,41949,1732438193163-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:49:54,020 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,41949,1732438193163-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T08:49:54,022 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c5d61ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:49:54,022 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 469387a2cdb6,41949,-1 for getting cluster id 2024-11-24T08:49:54,022 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T08:49:54,023 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T08:49:54,023 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T08:49:54,023 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,41949,1732438193163-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:49:54,024 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '564989dd-ccf6-4dde-a2a4-aecbfea7be75' 2024-11-24T08:49:54,024 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T08:49:54,025 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "564989dd-ccf6-4dde-a2a4-aecbfea7be75" 2024-11-24T08:49:54,025 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55f18070, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:49:54,025 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [469387a2cdb6,41949,-1] 2024-11-24T08:49:54,025 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T08:49:54,025 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:49:54,026 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39404, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T08:49:54,027 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c928d03, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:49:54,028 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:49:54,029 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=469387a2cdb6,45403,1732438193208, seqNum=-1] 2024-11-24T08:49:54,029 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:49:54,030 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33084, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:49:54,032 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=469387a2cdb6,41949,1732438193163 2024-11-24T08:49:54,032 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:49:54,035 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T08:49:54,036 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T08:49:54,037 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 469387a2cdb6,41949,1732438193163 2024-11-24T08:49:54,037 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@45451498 2024-11-24T08:49:54,037 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T08:49:54,038 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39408, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T08:49:54,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T08:49:54,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T08:49:54,039 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:49:54,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:49:54,042 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T08:49:54,042 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:54,042 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-24T08:49:54,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T08:49:54,043 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T08:49:54,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741835_1011 (size=405) 2024-11-24T08:49:54,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741835_1011 (size=405) 2024-11-24T08:49:54,056 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => aa190d37be8bcdae7367acf112976847, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07 2024-11-24T08:49:54,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741836_1012 (size=88) 2024-11-24T08:49:54,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741836_1012 (size=88) 2024-11-24T08:49:54,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:54,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:54,465 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:49:54,465 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing aa190d37be8bcdae7367acf112976847, disabling compactions & flushes 2024-11-24T08:49:54,465 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. 2024-11-24T08:49:54,465 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. 2024-11-24T08:49:54,465 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. after waiting 0 ms 2024-11-24T08:49:54,465 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. 2024-11-24T08:49:54,465 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. 2024-11-24T08:49:54,465 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for aa190d37be8bcdae7367acf112976847: Waiting for close lock at 1732438194465Disabling compacts and flushes for region at 1732438194465Disabling writes for close at 1732438194465Writing region close event to WAL at 1732438194465Closed at 1732438194465 2024-11-24T08:49:54,467 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T08:49:54,467 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732438194467"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732438194467"}]},"ts":"1732438194467"} 2024-11-24T08:49:54,470 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T08:49:54,471 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T08:49:54,471 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732438194471"}]},"ts":"1732438194471"} 2024-11-24T08:49:54,474 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-24T08:49:54,474 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=aa190d37be8bcdae7367acf112976847, ASSIGN}] 2024-11-24T08:49:54,475 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=aa190d37be8bcdae7367acf112976847, ASSIGN 2024-11-24T08:49:54,477 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=aa190d37be8bcdae7367acf112976847, ASSIGN; state=OFFLINE, location=469387a2cdb6,45403,1732438193208; forceNewPlan=false, retain=false 2024-11-24T08:49:54,627 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=aa190d37be8bcdae7367acf112976847, regionState=OPENING, regionLocation=469387a2cdb6,45403,1732438193208 2024-11-24T08:49:54,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=aa190d37be8bcdae7367acf112976847, ASSIGN because future has completed 2024-11-24T08:49:54,631 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure aa190d37be8bcdae7367acf112976847, server=469387a2cdb6,45403,1732438193208}] 2024-11-24T08:49:54,791 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. 2024-11-24T08:49:54,791 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => aa190d37be8bcdae7367acf112976847, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:49:54,792 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling aa190d37be8bcdae7367acf112976847 2024-11-24T08:49:54,792 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:49:54,792 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for aa190d37be8bcdae7367acf112976847 2024-11-24T08:49:54,792 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for aa190d37be8bcdae7367acf112976847 2024-11-24T08:49:54,794 INFO [StoreOpener-aa190d37be8bcdae7367acf112976847-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region aa190d37be8bcdae7367acf112976847 2024-11-24T08:49:54,797 INFO [StoreOpener-aa190d37be8bcdae7367acf112976847-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region aa190d37be8bcdae7367acf112976847 columnFamilyName info 2024-11-24T08:49:54,797 DEBUG [StoreOpener-aa190d37be8bcdae7367acf112976847-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:49:54,798 INFO [StoreOpener-aa190d37be8bcdae7367acf112976847-1 {}] regionserver.HStore(327): Store=aa190d37be8bcdae7367acf112976847/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:49:54,798 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for aa190d37be8bcdae7367acf112976847 2024-11-24T08:49:54,799 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847 2024-11-24T08:49:54,800 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847 2024-11-24T08:49:54,801 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for aa190d37be8bcdae7367acf112976847 2024-11-24T08:49:54,801 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for aa190d37be8bcdae7367acf112976847 2024-11-24T08:49:54,804 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for aa190d37be8bcdae7367acf112976847 2024-11-24T08:49:54,808 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:49:54,808 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened aa190d37be8bcdae7367acf112976847; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=797150, jitterRate=0.013628989458084106}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T08:49:54,808 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for aa190d37be8bcdae7367acf112976847 2024-11-24T08:49:54,809 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for aa190d37be8bcdae7367acf112976847: Running coprocessor pre-open hook at 1732438194792Writing region info on filesystem at 1732438194792Initializing all the Stores at 1732438194794 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438194794Cleaning up temporary data from old regions at 1732438194801 (+7 ms)Running coprocessor post-open hooks at 1732438194808 (+7 ms)Region opened successfully at 1732438194809 (+1 ms) 2024-11-24T08:49:54,810 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847., pid=6, masterSystemTime=1732438194785 2024-11-24T08:49:54,813 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. 2024-11-24T08:49:54,813 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. 2024-11-24T08:49:54,814 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=aa190d37be8bcdae7367acf112976847, regionState=OPEN, openSeqNum=2, regionLocation=469387a2cdb6,45403,1732438193208 2024-11-24T08:49:54,817 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure aa190d37be8bcdae7367acf112976847, server=469387a2cdb6,45403,1732438193208 because future has completed 2024-11-24T08:49:54,823 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T08:49:54,823 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure aa190d37be8bcdae7367acf112976847, server=469387a2cdb6,45403,1732438193208 in 188 msec 2024-11-24T08:49:54,827 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T08:49:54,827 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=aa190d37be8bcdae7367acf112976847, ASSIGN in 349 msec 2024-11-24T08:49:54,829 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T08:49:54,829 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732438194829"}]},"ts":"1732438194829"} 2024-11-24T08:49:54,832 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-24T08:49:54,833 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T08:49:54,836 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 794 msec 2024-11-24T08:49:55,318 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T08:49:55,318 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T08:49:55,320 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:49:55,320 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T08:49:55,320 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:49:55,321 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-24T08:49:55,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:55,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:56,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:56,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:57,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:57,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:58,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:58,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:59,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:59,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:49:59,474 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T08:49:59,476 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:59,476 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:59,476 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:59,477 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:59,477 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:59,477 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:59,498 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:59,498 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:59,498 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:59,498 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:59,499 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:59,499 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:59,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:59,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:59,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:59,505 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:49:59,509 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T08:49:59,509 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-24T08:50:00,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:00,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:01,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:01,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:02,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:02,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:03,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:03,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:04,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T08:50:04,139 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T08:50:04,139 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-24T08:50:04,146 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:50:04,146 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. 2024-11-24T08:50:04,150 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847., hostname=469387a2cdb6,45403,1732438193208, seqNum=2] 2024-11-24T08:50:04,157 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:50:04,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:50:04,162 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T08:50:04,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-24T08:50:04,164 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T08:50:04,166 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T08:50:04,331 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45403 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-24T08:50:04,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. 2024-11-24T08:50:04,332 INFO [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing aa190d37be8bcdae7367acf112976847 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T08:50:04,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/.tmp/info/ed96eaba5f48425a9726c4cfc973e838 is 1080, key is row0001/info:/1732438204151/Put/seqid=0 2024-11-24T08:50:04,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741837_1013 (size=6033) 2024-11-24T08:50:04,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741837_1013 (size=6033) 2024-11-24T08:50:04,354 INFO [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/.tmp/info/ed96eaba5f48425a9726c4cfc973e838 2024-11-24T08:50:04,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/.tmp/info/ed96eaba5f48425a9726c4cfc973e838 as hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/ed96eaba5f48425a9726c4cfc973e838 2024-11-24T08:50:04,367 INFO [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/ed96eaba5f48425a9726c4cfc973e838, entries=1, sequenceid=5, filesize=5.9 K 2024-11-24T08:50:04,368 INFO [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for aa190d37be8bcdae7367acf112976847 in 36ms, sequenceid=5, compaction requested=false 2024-11-24T08:50:04,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for aa190d37be8bcdae7367acf112976847: 2024-11-24T08:50:04,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. 2024-11-24T08:50:04,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-24T08:50:04,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-24T08:50:04,377 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-24T08:50:04,377 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 209 msec 2024-11-24T08:50:04,380 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 220 msec 2024-11-24T08:50:04,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:04,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:05,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:05,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:06,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:06,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:07,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:07,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:08,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:08,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:09,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:09,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:10,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:10,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:11,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:11,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:12,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:12,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:13,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:13,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:14,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-24T08:50:14,209 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T08:50:14,217 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:50:14,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:50:14,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-24T08:50:14,220 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T08:50:14,222 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T08:50:14,222 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T08:50:14,378 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45403 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-24T08:50:14,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. 2024-11-24T08:50:14,379 INFO [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing aa190d37be8bcdae7367acf112976847 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T08:50:14,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/.tmp/info/f989d86839414c30b617ed54acf3983f is 1080, key is row0002/info:/1732438214212/Put/seqid=0 2024-11-24T08:50:14,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741838_1014 (size=6033) 2024-11-24T08:50:14,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741838_1014 (size=6033) 2024-11-24T08:50:14,401 INFO [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/.tmp/info/f989d86839414c30b617ed54acf3983f 2024-11-24T08:50:14,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/.tmp/info/f989d86839414c30b617ed54acf3983f as hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/f989d86839414c30b617ed54acf3983f 2024-11-24T08:50:14,413 INFO [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/f989d86839414c30b617ed54acf3983f, entries=1, sequenceid=9, filesize=5.9 K 2024-11-24T08:50:14,414 INFO [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for aa190d37be8bcdae7367acf112976847 in 35ms, sequenceid=9, compaction requested=false 2024-11-24T08:50:14,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for aa190d37be8bcdae7367acf112976847: 2024-11-24T08:50:14,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. 2024-11-24T08:50:14,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-24T08:50:14,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-24T08:50:14,418 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-24T08:50:14,418 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 194 msec 2024-11-24T08:50:14,421 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 202 msec 2024-11-24T08:50:14,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:14,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:15,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:15,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:16,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:16,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:17,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:17,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:18,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:18,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:19,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:19,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:20,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:20,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 after 68073ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor206.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:50:20,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:20,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta after 68062ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor206.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:50:21,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:21,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:22,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:22,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:23,145 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T08:50:23,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:23,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:24,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-24T08:50:24,308 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T08:50:24,311 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C45403%2C1732438193208.1732438224310 2024-11-24T08:50:24,317 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:24,317 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:24,317 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:24,317 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:24,318 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:24,318 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/WALs/469387a2cdb6,45403,1732438193208/469387a2cdb6%2C45403%2C1732438193208.1732438193593 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/WALs/469387a2cdb6,45403,1732438193208/469387a2cdb6%2C45403%2C1732438193208.1732438224310 2024-11-24T08:50:24,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741833_1009 (size=5546) 2024-11-24T08:50:24,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741833_1009 (size=5546) 2024-11-24T08:50:24,329 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36863:36863),(127.0.0.1/127.0.0.1:40269:40269)] 2024-11-24T08:50:24,330 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:50:24,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:50:24,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-24T08:50:24,333 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T08:50:24,334 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T08:50:24,334 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T08:50:24,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:24,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:24,488 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45403 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-24T08:50:24,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. 2024-11-24T08:50:24,488 INFO [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing aa190d37be8bcdae7367acf112976847 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T08:50:24,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/.tmp/info/e59d1c43ff2e45e3a256b78aafedf073 is 1080, key is row0003/info:/1732438224309/Put/seqid=0 2024-11-24T08:50:24,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741840_1016 (size=6033) 2024-11-24T08:50:24,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741840_1016 (size=6033) 2024-11-24T08:50:24,505 INFO [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/.tmp/info/e59d1c43ff2e45e3a256b78aafedf073 2024-11-24T08:50:24,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/.tmp/info/e59d1c43ff2e45e3a256b78aafedf073 as hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/e59d1c43ff2e45e3a256b78aafedf073 2024-11-24T08:50:24,521 INFO [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/e59d1c43ff2e45e3a256b78aafedf073, entries=1, sequenceid=13, filesize=5.9 K 2024-11-24T08:50:24,523 INFO [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for aa190d37be8bcdae7367acf112976847 in 34ms, sequenceid=13, compaction requested=true 2024-11-24T08:50:24,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for aa190d37be8bcdae7367acf112976847: 2024-11-24T08:50:24,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. 2024-11-24T08:50:24,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-24T08:50:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-24T08:50:24,535 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-24T08:50:24,535 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 191 msec 2024-11-24T08:50:24,538 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 206 msec 2024-11-24T08:50:25,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:25,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:26,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:26,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:27,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:27,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:28,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:28,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:29,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:29,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:30,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:30,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:31,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:31,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:32,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:32,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:33,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:33,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:34,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-24T08:50:34,358 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T08:50:34,358 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:50:34,359 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:50:34,359 DEBUG [Time-limited test {}] regionserver.HStore(1541): aa190d37be8bcdae7367acf112976847/info is initiating minor compaction (all files) 2024-11-24T08:50:34,359 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T08:50:34,359 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:50:34,359 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of aa190d37be8bcdae7367acf112976847/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. 2024-11-24T08:50:34,360 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/ed96eaba5f48425a9726c4cfc973e838, hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/f989d86839414c30b617ed54acf3983f, hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/e59d1c43ff2e45e3a256b78aafedf073] into tmpdir=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/.tmp, totalSize=17.7 K 2024-11-24T08:50:34,360 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting ed96eaba5f48425a9726c4cfc973e838, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732438204151 2024-11-24T08:50:34,360 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting f989d86839414c30b617ed54acf3983f, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732438214212 2024-11-24T08:50:34,361 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting e59d1c43ff2e45e3a256b78aafedf073, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732438224309 2024-11-24T08:50:34,362 INFO [master/469387a2cdb6:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T08:50:34,362 INFO [master/469387a2cdb6:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T08:50:34,373 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): aa190d37be8bcdae7367acf112976847#info#compaction#44 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:50:34,374 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/.tmp/info/8e586a8f8cf64d6f8fb392d83e102bb4 is 1080, key is row0001/info:/1732438204151/Put/seqid=0 2024-11-24T08:50:34,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741841_1017 (size=8296) 2024-11-24T08:50:34,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741841_1017 (size=8296) 2024-11-24T08:50:34,387 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/.tmp/info/8e586a8f8cf64d6f8fb392d83e102bb4 as hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/8e586a8f8cf64d6f8fb392d83e102bb4 2024-11-24T08:50:34,394 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in aa190d37be8bcdae7367acf112976847/info of aa190d37be8bcdae7367acf112976847 into 8e586a8f8cf64d6f8fb392d83e102bb4(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:50:34,394 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for aa190d37be8bcdae7367acf112976847: 2024-11-24T08:50:34,396 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C45403%2C1732438193208.1732438234396 2024-11-24T08:50:34,402 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:34,402 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:34,402 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:34,402 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:34,402 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:34,403 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/WALs/469387a2cdb6,45403,1732438193208/469387a2cdb6%2C45403%2C1732438193208.1732438224310 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/WALs/469387a2cdb6,45403,1732438193208/469387a2cdb6%2C45403%2C1732438193208.1732438234396 2024-11-24T08:50:34,403 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40269:40269),(127.0.0.1/127.0.0.1:36863:36863)] 2024-11-24T08:50:34,404 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/WALs/469387a2cdb6,45403,1732438193208/469387a2cdb6%2C45403%2C1732438193208.1732438224310 is not closed yet, will try archiving it next time 2024-11-24T08:50:34,404 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/WALs/469387a2cdb6,45403,1732438193208/469387a2cdb6%2C45403%2C1732438193208.1732438193593 to hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/oldWALs/469387a2cdb6%2C45403%2C1732438193208.1732438193593 2024-11-24T08:50:34,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741839_1015 (size=2520) 2024-11-24T08:50:34,405 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:50:34,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741839_1015 (size=2520) 2024-11-24T08:50:34,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:50:34,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-24T08:50:34,407 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T08:50:34,408 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T08:50:34,409 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T08:50:34,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:34,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:34,563 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45403 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-24T08:50:34,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. 2024-11-24T08:50:34,563 INFO [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing aa190d37be8bcdae7367acf112976847 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T08:50:34,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/.tmp/info/7c956dce97b248f8b0d0f3de2be2b18a is 1080, key is row0000/info:/1732438234395/Put/seqid=0 2024-11-24T08:50:34,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741843_1019 (size=6033) 2024-11-24T08:50:34,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741843_1019 (size=6033) 2024-11-24T08:50:34,574 INFO [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/.tmp/info/7c956dce97b248f8b0d0f3de2be2b18a 2024-11-24T08:50:34,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/.tmp/info/7c956dce97b248f8b0d0f3de2be2b18a as hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/7c956dce97b248f8b0d0f3de2be2b18a 2024-11-24T08:50:34,586 INFO [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/7c956dce97b248f8b0d0f3de2be2b18a, entries=1, sequenceid=18, filesize=5.9 K 2024-11-24T08:50:34,587 INFO [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for aa190d37be8bcdae7367acf112976847 in 24ms, sequenceid=18, compaction requested=false 2024-11-24T08:50:34,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for aa190d37be8bcdae7367acf112976847: 2024-11-24T08:50:34,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. 2024-11-24T08:50:34,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-24T08:50:34,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-24T08:50:34,592 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-24T08:50:34,592 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 180 msec 2024-11-24T08:50:34,594 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 188 msec 2024-11-24T08:50:35,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:35,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:36,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:36,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:37,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:37,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:38,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:38,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:39,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:39,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:39,792 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region aa190d37be8bcdae7367acf112976847, had cached 0 bytes from a total of 14329 2024-11-24T08:50:40,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:40,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:41,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:41,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:42,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:42,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:43,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:43,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:44,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41949 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-24T08:50:44,438 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T08:50:44,442 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C45403%2C1732438193208.1732438244442 2024-11-24T08:50:44,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:44,453 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:44,454 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:44,454 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:44,454 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:44,454 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:44,454 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/WALs/469387a2cdb6,45403,1732438193208/469387a2cdb6%2C45403%2C1732438193208.1732438234396 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/WALs/469387a2cdb6,45403,1732438193208/469387a2cdb6%2C45403%2C1732438193208.1732438244442 2024-11-24T08:50:44,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741842_1018 (size=2026) 2024-11-24T08:50:44,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741842_1018 (size=2026) 2024-11-24T08:50:44,457 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/WALs/469387a2cdb6,45403,1732438193208/469387a2cdb6%2C45403%2C1732438193208.1732438224310 to hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/oldWALs/469387a2cdb6%2C45403%2C1732438193208.1732438224310 2024-11-24T08:50:44,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:44,457 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40269:40269),(127.0.0.1/127.0.0.1:36863:36863)] 2024-11-24T08:50:44,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T08:50:44,457 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:50:44,457 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:50:44,458 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:50:44,458 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:50:44,458 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T08:50:44,460 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T08:50:44,460 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1758487653, stopped=false 2024-11-24T08:50:44,460 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=469387a2cdb6,41949,1732438193163 2024-11-24T08:50:44,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:50:44,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45403-0x10070ec506a0001, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:50:44,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:50:44,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45403-0x10070ec506a0001, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:50:44,462 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:50:44,462 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45403-0x10070ec506a0001, quorum=127.0.0.1:53092, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:50:44,462 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:50:44,462 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:50:44,462 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:50:44,462 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:50:44,462 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '469387a2cdb6,45403,1732438193208' ***** 2024-11-24T08:50:44,462 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T08:50:44,462 INFO [RS:0;469387a2cdb6:45403 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T08:50:44,462 INFO [RS:0;469387a2cdb6:45403 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T08:50:44,463 INFO [RS:0;469387a2cdb6:45403 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T08:50:44,463 INFO [RS:0;469387a2cdb6:45403 {}] regionserver.HRegionServer(3091): Received CLOSE for aa190d37be8bcdae7367acf112976847 2024-11-24T08:50:44,463 INFO [RS:0;469387a2cdb6:45403 {}] regionserver.HRegionServer(959): stopping server 469387a2cdb6,45403,1732438193208 2024-11-24T08:50:44,463 INFO [RS:0;469387a2cdb6:45403 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:50:44,463 INFO [RS:0;469387a2cdb6:45403 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;469387a2cdb6:45403. 2024-11-24T08:50:44,463 DEBUG [RS:0;469387a2cdb6:45403 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:50:44,463 DEBUG [RS:0;469387a2cdb6:45403 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:50:44,463 INFO [RS:0;469387a2cdb6:45403 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T08:50:44,463 INFO [RS:0;469387a2cdb6:45403 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T08:50:44,463 INFO [RS:0;469387a2cdb6:45403 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T08:50:44,463 INFO [RS:0;469387a2cdb6:45403 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T08:50:44,464 INFO [RS:0;469387a2cdb6:45403 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T08:50:44,464 DEBUG [RS:0;469387a2cdb6:45403 {}] regionserver.HRegionServer(1325): Online Regions={aa190d37be8bcdae7367acf112976847=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847., 1588230740=hbase:meta,,1.1588230740} 2024-11-24T08:50:44,464 DEBUG [RS:0;469387a2cdb6:45403 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, aa190d37be8bcdae7367acf112976847 2024-11-24T08:50:44,464 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:50:44,464 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing aa190d37be8bcdae7367acf112976847, disabling compactions & flushes 2024-11-24T08:50:44,464 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:50:44,464 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. 2024-11-24T08:50:44,464 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:50:44,464 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. 2024-11-24T08:50:44,464 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:50:44,464 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:50:44,464 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. after waiting 0 ms 2024-11-24T08:50:44,464 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. 2024-11-24T08:50:44,464 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-24T08:50:44,464 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing aa190d37be8bcdae7367acf112976847 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T08:50:44,466 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T08:50:44,469 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/.tmp/info/c236e35496214ec9a2730f2bc4efe265 is 1080, key is row0001/info:/1732438244440/Put/seqid=0 2024-11-24T08:50:44,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741845_1021 (size=6033) 2024-11-24T08:50:44,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741845_1021 (size=6033) 2024-11-24T08:50:44,483 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/hbase/meta/1588230740/.tmp/info/ae805796fa6d40a7935de3ba51e1c52c is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847./info:regioninfo/1732438194814/Put/seqid=0 2024-11-24T08:50:44,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741846_1022 (size=7308) 2024-11-24T08:50:44,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741846_1022 (size=7308) 2024-11-24T08:50:44,511 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/hbase/meta/1588230740/.tmp/info/ae805796fa6d40a7935de3ba51e1c52c 2024-11-24T08:50:44,533 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/hbase/meta/1588230740/.tmp/ns/7aa8c83bdf8b4fe197a3fd587072b02c is 43, key is default/ns:d/1732438194006/Put/seqid=0 2024-11-24T08:50:44,536 INFO [regionserver/469387a2cdb6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T08:50:44,536 INFO [regionserver/469387a2cdb6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T08:50:44,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741847_1023 (size=5153) 2024-11-24T08:50:44,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741847_1023 (size=5153) 2024-11-24T08:50:44,540 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/hbase/meta/1588230740/.tmp/ns/7aa8c83bdf8b4fe197a3fd587072b02c 2024-11-24T08:50:44,582 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/hbase/meta/1588230740/.tmp/table/4dc0a417fdd942d09df325495a413f95 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732438194829/Put/seqid=0 2024-11-24T08:50:44,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741848_1024 (size=5508) 2024-11-24T08:50:44,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741848_1024 (size=5508) 2024-11-24T08:50:44,592 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/hbase/meta/1588230740/.tmp/table/4dc0a417fdd942d09df325495a413f95 2024-11-24T08:50:44,599 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/hbase/meta/1588230740/.tmp/info/ae805796fa6d40a7935de3ba51e1c52c as hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/hbase/meta/1588230740/info/ae805796fa6d40a7935de3ba51e1c52c 2024-11-24T08:50:44,605 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/hbase/meta/1588230740/info/ae805796fa6d40a7935de3ba51e1c52c, entries=10, sequenceid=11, filesize=7.1 K 2024-11-24T08:50:44,606 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/hbase/meta/1588230740/.tmp/ns/7aa8c83bdf8b4fe197a3fd587072b02c as hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/hbase/meta/1588230740/ns/7aa8c83bdf8b4fe197a3fd587072b02c 2024-11-24T08:50:44,611 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/hbase/meta/1588230740/ns/7aa8c83bdf8b4fe197a3fd587072b02c, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T08:50:44,612 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/hbase/meta/1588230740/.tmp/table/4dc0a417fdd942d09df325495a413f95 as hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/hbase/meta/1588230740/table/4dc0a417fdd942d09df325495a413f95 2024-11-24T08:50:44,617 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/hbase/meta/1588230740/table/4dc0a417fdd942d09df325495a413f95, entries=2, sequenceid=11, filesize=5.4 K 2024-11-24T08:50:44,618 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 154ms, sequenceid=11, compaction requested=false 2024-11-24T08:50:44,623 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T08:50:44,624 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:50:44,624 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:50:44,624 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732438244464Running coprocessor pre-close hooks at 1732438244464Disabling compacts and flushes for region at 1732438244464Disabling writes for close at 1732438244464Obtaining lock to block concurrent updates at 1732438244464Preparing flush snapshotting stores in 1588230740 at 1732438244464Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732438244464Flushing stores of hbase:meta,,1.1588230740 at 1732438244465 (+1 ms)Flushing 1588230740/info: creating writer at 1732438244465Flushing 1588230740/info: appending metadata at 1732438244483 (+18 ms)Flushing 1588230740/info: closing flushed file at 1732438244483Flushing 1588230740/ns: creating writer at 1732438244518 (+35 ms)Flushing 1588230740/ns: appending metadata at 1732438244532 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732438244532Flushing 1588230740/table: creating writer at 1732438244555 (+23 ms)Flushing 1588230740/table: appending metadata at 1732438244581 (+26 ms)Flushing 1588230740/table: closing flushed file at 1732438244581Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@33ce86b: reopening flushed file at 1732438244598 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d7499be: reopening flushed file at 1732438244605 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1bde8b24: reopening flushed file at 1732438244611 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 154ms, sequenceid=11, compaction requested=false at 1732438244618 (+7 ms)Writing region close event to WAL at 1732438244619 (+1 ms)Running coprocessor post-close hooks at 1732438244624 (+5 ms)Closed at 1732438244624 2024-11-24T08:50:44,624 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T08:50:44,664 DEBUG [RS:0;469387a2cdb6:45403 {}] regionserver.HRegionServer(1351): Waiting on aa190d37be8bcdae7367acf112976847 2024-11-24T08:50:44,864 DEBUG [RS:0;469387a2cdb6:45403 {}] regionserver.HRegionServer(1351): Waiting on aa190d37be8bcdae7367acf112976847 2024-11-24T08:50:44,877 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/.tmp/info/c236e35496214ec9a2730f2bc4efe265 2024-11-24T08:50:44,883 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/.tmp/info/c236e35496214ec9a2730f2bc4efe265 as hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/c236e35496214ec9a2730f2bc4efe265 2024-11-24T08:50:44,889 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/c236e35496214ec9a2730f2bc4efe265, entries=1, sequenceid=22, filesize=5.9 K 2024-11-24T08:50:44,890 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for aa190d37be8bcdae7367acf112976847 in 426ms, sequenceid=22, compaction requested=true 2024-11-24T08:50:44,891 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/ed96eaba5f48425a9726c4cfc973e838, hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/f989d86839414c30b617ed54acf3983f, hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/e59d1c43ff2e45e3a256b78aafedf073] to archive 2024-11-24T08:50:44,892 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T08:50:44,894 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/ed96eaba5f48425a9726c4cfc973e838 to hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/ed96eaba5f48425a9726c4cfc973e838 2024-11-24T08:50:44,898 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/f989d86839414c30b617ed54acf3983f to hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/f989d86839414c30b617ed54acf3983f 2024-11-24T08:50:44,899 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/e59d1c43ff2e45e3a256b78aafedf073 to hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/info/e59d1c43ff2e45e3a256b78aafedf073 2024-11-24T08:50:44,900 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=469387a2cdb6:41949 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-24T08:50:44,900 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [ed96eaba5f48425a9726c4cfc973e838=6033, f989d86839414c30b617ed54acf3983f=6033, e59d1c43ff2e45e3a256b78aafedf073=6033] 2024-11-24T08:50:44,904 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa190d37be8bcdae7367acf112976847/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-24T08:50:44,905 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. 2024-11-24T08:50:44,906 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for aa190d37be8bcdae7367acf112976847: Waiting for close lock at 1732438244464Running coprocessor pre-close hooks at 1732438244464Disabling compacts and flushes for region at 1732438244464Disabling writes for close at 1732438244464Obtaining lock to block concurrent updates at 1732438244464Preparing flush snapshotting stores in aa190d37be8bcdae7367acf112976847 at 1732438244464Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732438244464Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. at 1732438244465 (+1 ms)Flushing aa190d37be8bcdae7367acf112976847/info: creating writer at 1732438244465Flushing aa190d37be8bcdae7367acf112976847/info: appending metadata at 1732438244468 (+3 ms)Flushing aa190d37be8bcdae7367acf112976847/info: closing flushed file at 1732438244468Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@68deb937: reopening flushed file at 1732438244882 (+414 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for aa190d37be8bcdae7367acf112976847 in 426ms, sequenceid=22, compaction requested=true at 1732438244891 (+9 ms)Writing region close event to WAL at 1732438244901 (+10 ms)Running coprocessor post-close hooks at 1732438244905 (+4 ms)Closed at 1732438244905 2024-11-24T08:50:44,906 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732438194038.aa190d37be8bcdae7367acf112976847. 2024-11-24T08:50:45,064 INFO [RS:0;469387a2cdb6:45403 {}] regionserver.HRegionServer(976): stopping server 469387a2cdb6,45403,1732438193208; all regions closed. 2024-11-24T08:50:45,065 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:45,065 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:45,065 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:45,065 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:45,065 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:45,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741834_1010 (size=3306) 2024-11-24T08:50:45,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741834_1010 (size=3306) 2024-11-24T08:50:45,070 DEBUG [RS:0;469387a2cdb6:45403 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/oldWALs 2024-11-24T08:50:45,070 INFO [RS:0;469387a2cdb6:45403 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 469387a2cdb6%2C45403%2C1732438193208.meta:.meta(num 1732438193962) 2024-11-24T08:50:45,071 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:45,071 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:45,071 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:45,071 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:45,071 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:45,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741844_1020 (size=1252) 2024-11-24T08:50:45,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741844_1020 (size=1252) 2024-11-24T08:50:45,076 DEBUG [RS:0;469387a2cdb6:45403 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/oldWALs 2024-11-24T08:50:45,076 INFO [RS:0;469387a2cdb6:45403 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 469387a2cdb6%2C45403%2C1732438193208:(num 1732438244442) 2024-11-24T08:50:45,076 DEBUG [RS:0;469387a2cdb6:45403 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:50:45,076 INFO [RS:0;469387a2cdb6:45403 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:50:45,076 INFO [RS:0;469387a2cdb6:45403 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:50:45,077 INFO [RS:0;469387a2cdb6:45403 {}] hbase.ChoreService(370): Chore service for: regionserver/469387a2cdb6:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T08:50:45,077 INFO [RS:0;469387a2cdb6:45403 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:50:45,077 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:50:45,077 INFO [RS:0;469387a2cdb6:45403 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45403 2024-11-24T08:50:45,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45403-0x10070ec506a0001, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/469387a2cdb6,45403,1732438193208 2024-11-24T08:50:45,078 INFO [RS:0;469387a2cdb6:45403 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:50:45,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:50:45,079 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$361/0x00007f98d4902e08@4b33d111 rejected from java.util.concurrent.ThreadPoolExecutor@e315613[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-24T08:50:45,079 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [469387a2cdb6,45403,1732438193208] 2024-11-24T08:50:45,080 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/469387a2cdb6,45403,1732438193208 already deleted, retry=false 2024-11-24T08:50:45,080 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 469387a2cdb6,45403,1732438193208 expired; onlineServers=0 2024-11-24T08:50:45,080 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '469387a2cdb6,41949,1732438193163' ***** 2024-11-24T08:50:45,080 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T08:50:45,080 INFO [M:0;469387a2cdb6:41949 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:50:45,080 INFO [M:0;469387a2cdb6:41949 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:50:45,080 DEBUG [M:0;469387a2cdb6:41949 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T08:50:45,080 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T08:50:45,080 DEBUG [M:0;469387a2cdb6:41949 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T08:50:45,080 DEBUG [master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.small.0-1732438193345 {}] cleaner.HFileCleaner(306): Exit Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.small.0-1732438193345,5,FailOnTimeoutGroup] 2024-11-24T08:50:45,080 DEBUG [master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.large.0-1732438193345 {}] cleaner.HFileCleaner(306): Exit Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.large.0-1732438193345,5,FailOnTimeoutGroup] 2024-11-24T08:50:45,080 INFO [M:0;469387a2cdb6:41949 {}] hbase.ChoreService(370): Chore service for: master/469387a2cdb6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T08:50:45,081 INFO [M:0;469387a2cdb6:41949 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:50:45,081 DEBUG [M:0;469387a2cdb6:41949 {}] master.HMaster(1795): Stopping service threads 2024-11-24T08:50:45,081 INFO [M:0;469387a2cdb6:41949 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T08:50:45,081 INFO [M:0;469387a2cdb6:41949 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:50:45,081 INFO [M:0;469387a2cdb6:41949 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T08:50:45,081 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T08:50:45,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T08:50:45,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:50:45,082 DEBUG [M:0;469387a2cdb6:41949 {}] zookeeper.ZKUtil(347): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T08:50:45,082 WARN [M:0;469387a2cdb6:41949 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T08:50:45,082 INFO [M:0;469387a2cdb6:41949 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/.lastflushedseqids 2024-11-24T08:50:45,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741849_1025 (size=130) 2024-11-24T08:50:45,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741849_1025 (size=130) 2024-11-24T08:50:45,091 INFO [M:0;469387a2cdb6:41949 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T08:50:45,091 INFO [M:0;469387a2cdb6:41949 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T08:50:45,092 DEBUG [M:0;469387a2cdb6:41949 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:50:45,092 INFO [M:0;469387a2cdb6:41949 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:50:45,092 DEBUG [M:0;469387a2cdb6:41949 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:50:45,092 DEBUG [M:0;469387a2cdb6:41949 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:50:45,092 DEBUG [M:0;469387a2cdb6:41949 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:50:45,092 INFO [M:0;469387a2cdb6:41949 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.61 KB heapSize=55.02 KB 2024-11-24T08:50:45,107 DEBUG [M:0;469387a2cdb6:41949 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3da478edfe02495aa5dbdb9b9e029880 is 82, key is hbase:meta,,1/info:regioninfo/1732438193991/Put/seqid=0 2024-11-24T08:50:45,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741850_1026 (size=5672) 2024-11-24T08:50:45,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741850_1026 (size=5672) 2024-11-24T08:50:45,112 INFO [M:0;469387a2cdb6:41949 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3da478edfe02495aa5dbdb9b9e029880 2024-11-24T08:50:45,134 DEBUG [M:0;469387a2cdb6:41949 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/76f800c876054931b0342724a4b1c46b is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732438194835/Put/seqid=0 2024-11-24T08:50:45,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741851_1027 (size=7825) 2024-11-24T08:50:45,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741851_1027 (size=7825) 2024-11-24T08:50:45,140 INFO [M:0;469387a2cdb6:41949 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.01 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/76f800c876054931b0342724a4b1c46b 2024-11-24T08:50:45,144 INFO [M:0;469387a2cdb6:41949 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 76f800c876054931b0342724a4b1c46b 2024-11-24T08:50:45,158 DEBUG [M:0;469387a2cdb6:41949 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5e95593e855e43bfb6c55ce8d0fbb585 is 69, key is 469387a2cdb6,45403,1732438193208/rs:state/1732438193448/Put/seqid=0 2024-11-24T08:50:45,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741852_1028 (size=5156) 2024-11-24T08:50:45,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741852_1028 (size=5156) 2024-11-24T08:50:45,164 INFO [M:0;469387a2cdb6:41949 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5e95593e855e43bfb6c55ce8d0fbb585 2024-11-24T08:50:45,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45403-0x10070ec506a0001, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:50:45,179 INFO [RS:0;469387a2cdb6:45403 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:50:45,180 INFO [RS:0;469387a2cdb6:45403 {}] regionserver.HRegionServer(1031): Exiting; stopping=469387a2cdb6,45403,1732438193208; zookeeper connection closed. 2024-11-24T08:50:45,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45403-0x10070ec506a0001, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:50:45,180 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4687e01a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4687e01a 2024-11-24T08:50:45,180 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T08:50:45,189 DEBUG [M:0;469387a2cdb6:41949 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5828c02afb8e47dcb3f51879038a09cc is 52, key is load_balancer_on/state:d/1732438194034/Put/seqid=0 2024-11-24T08:50:45,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741853_1029 (size=5056) 2024-11-24T08:50:45,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741853_1029 (size=5056) 2024-11-24T08:50:45,196 INFO [M:0;469387a2cdb6:41949 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5828c02afb8e47dcb3f51879038a09cc 2024-11-24T08:50:45,202 DEBUG [M:0;469387a2cdb6:41949 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3da478edfe02495aa5dbdb9b9e029880 as hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3da478edfe02495aa5dbdb9b9e029880 2024-11-24T08:50:45,206 INFO [M:0;469387a2cdb6:41949 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3da478edfe02495aa5dbdb9b9e029880, entries=8, sequenceid=121, filesize=5.5 K 2024-11-24T08:50:45,207 DEBUG [M:0;469387a2cdb6:41949 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/76f800c876054931b0342724a4b1c46b as hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/76f800c876054931b0342724a4b1c46b 2024-11-24T08:50:45,212 INFO [M:0;469387a2cdb6:41949 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 76f800c876054931b0342724a4b1c46b 2024-11-24T08:50:45,212 INFO [M:0;469387a2cdb6:41949 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/76f800c876054931b0342724a4b1c46b, entries=14, sequenceid=121, filesize=7.6 K 2024-11-24T08:50:45,213 DEBUG [M:0;469387a2cdb6:41949 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5e95593e855e43bfb6c55ce8d0fbb585 as hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5e95593e855e43bfb6c55ce8d0fbb585 2024-11-24T08:50:45,217 INFO [M:0;469387a2cdb6:41949 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5e95593e855e43bfb6c55ce8d0fbb585, entries=1, sequenceid=121, filesize=5.0 K 2024-11-24T08:50:45,218 DEBUG [M:0;469387a2cdb6:41949 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5828c02afb8e47dcb3f51879038a09cc as hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5828c02afb8e47dcb3f51879038a09cc 2024-11-24T08:50:45,223 INFO [M:0;469387a2cdb6:41949 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45159/user/jenkins/test-data/09638ba2-64f6-f80b-6f03-77db915dba07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5828c02afb8e47dcb3f51879038a09cc, entries=1, sequenceid=121, filesize=4.9 K 2024-11-24T08:50:45,224 INFO [M:0;469387a2cdb6:41949 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.61 KB/44656, heapSize ~54.95 KB/56272, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=121, compaction requested=false 2024-11-24T08:50:45,225 INFO [M:0;469387a2cdb6:41949 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:50:45,225 DEBUG [M:0;469387a2cdb6:41949 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732438245092Disabling compacts and flushes for region at 1732438245092Disabling writes for close at 1732438245092Obtaining lock to block concurrent updates at 1732438245092Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732438245092Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44656, getHeapSize=56272, getOffHeapSize=0, getCellsCount=140 at 1732438245092Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732438245093 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732438245093Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732438245106 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732438245106Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732438245117 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732438245134 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732438245134Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732438245144 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732438245157 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732438245157Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732438245169 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732438245188 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732438245188Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ae8db0d: reopening flushed file at 1732438245201 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@62246e88: reopening flushed file at 1732438245207 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4461de91: reopening flushed file at 1732438245212 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5fc65e61: reopening flushed file at 1732438245217 (+5 ms)Finished flush of dataSize ~43.61 KB/44656, heapSize ~54.95 KB/56272, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=121, compaction requested=false at 1732438245224 (+7 ms)Writing region close event to WAL at 1732438245225 (+1 ms)Closed at 1732438245225 2024-11-24T08:50:45,226 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:45,226 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:45,226 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:45,226 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:45,226 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:50:45,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43887 is added to blk_1073741830_1006 (size=53053) 2024-11-24T08:50:45,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46073 is added to blk_1073741830_1006 (size=53053) 2024-11-24T08:50:45,229 INFO [M:0;469387a2cdb6:41949 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T08:50:45,229 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:50:45,229 INFO [M:0;469387a2cdb6:41949 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41949 2024-11-24T08:50:45,229 INFO [M:0;469387a2cdb6:41949 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:50:45,318 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:50:45,318 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T08:50:45,318 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T08:50:45,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:50:45,331 INFO [M:0;469387a2cdb6:41949 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:50:45,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:50:45,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10070ec506a0000, quorum=127.0.0.1:53092, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:50:45,333 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3fad1127{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:50:45,333 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3fa77a2d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:50:45,333 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:50:45,333 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d8b64e8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:50:45,333 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37c41708{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/hadoop.log.dir/,STOPPED} 2024-11-24T08:50:45,335 WARN [BP-459801175-172.17.0.2-1732438192599 heartbeating to localhost/127.0.0.1:45159 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:50:45,335 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:50:45,335 WARN [BP-459801175-172.17.0.2-1732438192599 heartbeating to localhost/127.0.0.1:45159 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-459801175-172.17.0.2-1732438192599 (Datanode Uuid 4435cdb2-40e7-400c-8bb2-dd489c39f08a) service to localhost/127.0.0.1:45159 2024-11-24T08:50:45,335 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:50:45,335 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/cluster_515c9a62-d591-cfff-8e97-71830b516b1f/data/data3/current/BP-459801175-172.17.0.2-1732438192599 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:50:45,335 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/cluster_515c9a62-d591-cfff-8e97-71830b516b1f/data/data4/current/BP-459801175-172.17.0.2-1732438192599 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:50:45,335 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:50:45,337 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3230f8f4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:50:45,337 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@61363b5b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:50:45,337 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:50:45,338 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7bbcea3a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:50:45,338 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f882a6c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/hadoop.log.dir/,STOPPED} 2024-11-24T08:50:45,339 WARN [BP-459801175-172.17.0.2-1732438192599 heartbeating to localhost/127.0.0.1:45159 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:50:45,339 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:50:45,339 WARN [BP-459801175-172.17.0.2-1732438192599 heartbeating to localhost/127.0.0.1:45159 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-459801175-172.17.0.2-1732438192599 (Datanode Uuid c3136198-f274-49e3-9c41-40b9d8a33bdc) service to localhost/127.0.0.1:45159 2024-11-24T08:50:45,339 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:50:45,339 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/cluster_515c9a62-d591-cfff-8e97-71830b516b1f/data/data1/current/BP-459801175-172.17.0.2-1732438192599 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:50:45,339 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/cluster_515c9a62-d591-cfff-8e97-71830b516b1f/data/data2/current/BP-459801175-172.17.0.2-1732438192599 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:50:45,340 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:50:45,345 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6298d5fb{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:50:45,345 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@26faf95f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:50:45,345 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:50:45,345 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70e37295{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:50:45,345 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a430ed9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/hadoop.log.dir/,STOPPED} 2024-11-24T08:50:45,351 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T08:50:45,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T08:50:45,386 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=205 (was 179) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:45159 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45159 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45159 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:45159 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:45159 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/469387a2cdb6:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45159 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:45159 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45159 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=116 (was 93) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1502 (was 1432) - AvailableMemoryMB LEAK? - 2024-11-24T08:50:45,394 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=205, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=116, ProcessCount=11, AvailableMemoryMB=1501 2024-11-24T08:50:45,394 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T08:50:45,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/hadoop.log.dir so I do NOT create it in target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561 2024-11-24T08:50:45,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ab19236c-57d0-a88f-e1df-c659cabe7be6/hadoop.tmp.dir so I do NOT create it in target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561 2024-11-24T08:50:45,395 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/cluster_d32e16b6-9103-829c-f8cb-579260fe909a, deleteOnExit=true 2024-11-24T08:50:45,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T08:50:45,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/test.cache.data in system properties and HBase conf 2024-11-24T08:50:45,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T08:50:45,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/hadoop.log.dir in system properties and HBase conf 2024-11-24T08:50:45,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T08:50:45,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T08:50:45,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T08:50:45,395 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T08:50:45,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:50:45,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:50:45,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T08:50:45,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:50:45,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T08:50:45,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T08:50:45,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:50:45,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:50:45,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T08:50:45,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/nfs.dump.dir in system properties and HBase conf 2024-11-24T08:50:45,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/java.io.tmpdir in system properties and HBase conf 2024-11-24T08:50:45,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:50:45,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T08:50:45,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T08:50:45,408 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:50:45,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:45,455 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:50:45,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:45,459 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:50:45,460 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:50:45,460 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:50:45,460 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:50:45,461 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:50:45,461 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@547e6321{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:50:45,461 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@79af711a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:50:45,464 INFO [regionserver/469387a2cdb6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:50:45,554 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3d9cf385{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/java.io.tmpdir/jetty-localhost-33945-hadoop-hdfs-3_4_1-tests_jar-_-any-2139920595927865034/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:50:45,555 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@35058e3e{HTTP/1.1, (http/1.1)}{localhost:33945} 2024-11-24T08:50:45,555 INFO [Time-limited test {}] server.Server(415): Started @236172ms 2024-11-24T08:50:45,567 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:50:45,612 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:50:45,616 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:50:45,620 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:50:45,620 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:50:45,620 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:50:45,622 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@53da1c3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:50:45,622 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@384851d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:50:45,714 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5492219{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/java.io.tmpdir/jetty-localhost-39957-hadoop-hdfs-3_4_1-tests_jar-_-any-10788889183379788771/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:50:45,715 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@231b9bd6{HTTP/1.1, (http/1.1)}{localhost:39957} 2024-11-24T08:50:45,715 INFO [Time-limited test {}] server.Server(415): Started @236332ms 2024-11-24T08:50:45,716 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:50:45,743 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:50:45,747 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:50:45,748 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:50:45,748 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:50:45,748 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:50:45,748 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74b22f54{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:50:45,748 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c149881{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:50:45,777 WARN [Thread-1950 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/cluster_d32e16b6-9103-829c-f8cb-579260fe909a/data/data1/current/BP-619707123-172.17.0.2-1732438245419/current, will proceed with Du for space computation calculation, 2024-11-24T08:50:45,777 WARN [Thread-1951 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/cluster_d32e16b6-9103-829c-f8cb-579260fe909a/data/data2/current/BP-619707123-172.17.0.2-1732438245419/current, will proceed with Du for space computation calculation, 2024-11-24T08:50:45,796 WARN [Thread-1929 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:50:45,798 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1f1bebd31ee8a7e7 with lease ID 0x330c1d9960302fad: Processing first storage report for DS-b567a274-339d-44a5-ba09-47acf8664e18 from datanode DatanodeRegistration(127.0.0.1:37939, datanodeUuid=7ef7d38a-49f5-46b5-99ef-212199663221, infoPort=46531, infoSecurePort=0, ipcPort=44293, storageInfo=lv=-57;cid=testClusterID;nsid=1225191227;c=1732438245419) 2024-11-24T08:50:45,798 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1f1bebd31ee8a7e7 with lease ID 0x330c1d9960302fad: from storage DS-b567a274-339d-44a5-ba09-47acf8664e18 node DatanodeRegistration(127.0.0.1:37939, datanodeUuid=7ef7d38a-49f5-46b5-99ef-212199663221, infoPort=46531, infoSecurePort=0, ipcPort=44293, storageInfo=lv=-57;cid=testClusterID;nsid=1225191227;c=1732438245419), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:50:45,798 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1f1bebd31ee8a7e7 with lease ID 0x330c1d9960302fad: Processing first storage report for DS-03f115a3-d5ee-4361-a82c-1bd47195068f from datanode DatanodeRegistration(127.0.0.1:37939, datanodeUuid=7ef7d38a-49f5-46b5-99ef-212199663221, infoPort=46531, infoSecurePort=0, ipcPort=44293, storageInfo=lv=-57;cid=testClusterID;nsid=1225191227;c=1732438245419) 2024-11-24T08:50:45,798 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1f1bebd31ee8a7e7 with lease ID 0x330c1d9960302fad: from storage DS-03f115a3-d5ee-4361-a82c-1bd47195068f node DatanodeRegistration(127.0.0.1:37939, datanodeUuid=7ef7d38a-49f5-46b5-99ef-212199663221, infoPort=46531, infoSecurePort=0, ipcPort=44293, storageInfo=lv=-57;cid=testClusterID;nsid=1225191227;c=1732438245419), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:50:45,859 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5baa5e87{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/java.io.tmpdir/jetty-localhost-40951-hadoop-hdfs-3_4_1-tests_jar-_-any-3118948652966162563/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:50:45,859 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@240b8237{HTTP/1.1, (http/1.1)}{localhost:40951} 2024-11-24T08:50:45,859 INFO [Time-limited test {}] server.Server(415): Started @236476ms 2024-11-24T08:50:45,860 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:50:45,915 WARN [Thread-1976 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/cluster_d32e16b6-9103-829c-f8cb-579260fe909a/data/data3/current/BP-619707123-172.17.0.2-1732438245419/current, will proceed with Du for space computation calculation, 2024-11-24T08:50:45,915 WARN [Thread-1977 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/cluster_d32e16b6-9103-829c-f8cb-579260fe909a/data/data4/current/BP-619707123-172.17.0.2-1732438245419/current, will proceed with Du for space computation calculation, 2024-11-24T08:50:45,932 WARN [Thread-1965 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:50:45,934 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd71c0ff1026b0f33 with lease ID 0x330c1d9960302fae: Processing first storage report for DS-b8a6b01d-fb42-4849-a532-8a0bd0be02f8 from datanode DatanodeRegistration(127.0.0.1:33149, datanodeUuid=f396ea77-d025-489a-b6b6-9445118fba37, infoPort=43467, infoSecurePort=0, ipcPort=39613, storageInfo=lv=-57;cid=testClusterID;nsid=1225191227;c=1732438245419) 2024-11-24T08:50:45,934 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd71c0ff1026b0f33 with lease ID 0x330c1d9960302fae: from storage DS-b8a6b01d-fb42-4849-a532-8a0bd0be02f8 node DatanodeRegistration(127.0.0.1:33149, datanodeUuid=f396ea77-d025-489a-b6b6-9445118fba37, infoPort=43467, infoSecurePort=0, ipcPort=39613, storageInfo=lv=-57;cid=testClusterID;nsid=1225191227;c=1732438245419), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:50:45,934 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd71c0ff1026b0f33 with lease ID 0x330c1d9960302fae: Processing first storage report for DS-d309af73-5bae-421d-aea1-df395f0090ca from datanode DatanodeRegistration(127.0.0.1:33149, datanodeUuid=f396ea77-d025-489a-b6b6-9445118fba37, infoPort=43467, infoSecurePort=0, ipcPort=39613, storageInfo=lv=-57;cid=testClusterID;nsid=1225191227;c=1732438245419) 2024-11-24T08:50:45,934 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd71c0ff1026b0f33 with lease ID 0x330c1d9960302fae: from storage DS-d309af73-5bae-421d-aea1-df395f0090ca node DatanodeRegistration(127.0.0.1:33149, datanodeUuid=f396ea77-d025-489a-b6b6-9445118fba37, infoPort=43467, infoSecurePort=0, ipcPort=39613, storageInfo=lv=-57;cid=testClusterID;nsid=1225191227;c=1732438245419), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:50:45,984 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561 2024-11-24T08:50:45,987 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/cluster_d32e16b6-9103-829c-f8cb-579260fe909a/zookeeper_0, clientPort=55564, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/cluster_d32e16b6-9103-829c-f8cb-579260fe909a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/cluster_d32e16b6-9103-829c-f8cb-579260fe909a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T08:50:45,988 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55564 2024-11-24T08:50:45,989 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:50:45,991 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:50:46,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:50:46,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:50:46,015 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53 with version=8 2024-11-24T08:50:46,015 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/hbase-staging 2024-11-24T08:50:46,017 INFO [Time-limited test {}] client.ConnectionUtils(128): master/469387a2cdb6:0 server-side Connection retries=45 2024-11-24T08:50:46,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:50:46,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:50:46,017 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:50:46,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:50:46,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:50:46,017 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T08:50:46,017 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:50:46,021 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34461 2024-11-24T08:50:46,023 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34461 connecting to ZooKeeper ensemble=127.0.0.1:55564 2024-11-24T08:50:46,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:344610x0, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:50:46,036 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34461-0x10070ed1ed10000 connected 2024-11-24T08:50:46,063 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:50:46,065 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:50:46,068 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:50:46,068 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53, hbase.cluster.distributed=false 2024-11-24T08:50:46,070 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:50:46,073 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34461 2024-11-24T08:50:46,076 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34461 2024-11-24T08:50:46,076 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34461 2024-11-24T08:50:46,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34461 2024-11-24T08:50:46,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34461 2024-11-24T08:50:46,097 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/469387a2cdb6:0 server-side Connection retries=45 2024-11-24T08:50:46,097 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:50:46,097 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:50:46,097 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:50:46,098 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:50:46,098 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:50:46,098 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T08:50:46,098 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:50:46,098 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37059 2024-11-24T08:50:46,100 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37059 connecting to ZooKeeper ensemble=127.0.0.1:55564 2024-11-24T08:50:46,101 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:50:46,103 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:50:46,107 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:370590x0, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:50:46,108 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:370590x0, quorum=127.0.0.1:55564, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:50:46,108 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37059-0x10070ed1ed10001 connected 2024-11-24T08:50:46,108 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T08:50:46,109 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T08:50:46,109 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37059-0x10070ed1ed10001, quorum=127.0.0.1:55564, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T08:50:46,111 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37059-0x10070ed1ed10001, quorum=127.0.0.1:55564, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:50:46,116 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37059 2024-11-24T08:50:46,117 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37059 2024-11-24T08:50:46,123 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37059 2024-11-24T08:50:46,125 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37059 2024-11-24T08:50:46,126 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37059 2024-11-24T08:50:46,143 DEBUG [M:0;469387a2cdb6:34461 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;469387a2cdb6:34461 2024-11-24T08:50:46,144 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/469387a2cdb6,34461,1732438246017 2024-11-24T08:50:46,145 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x10070ed1ed10001, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:50:46,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:50:46,149 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/469387a2cdb6,34461,1732438246017 2024-11-24T08:50:46,151 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x10070ed1ed10001, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T08:50:46,151 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x10070ed1ed10001, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:50:46,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:50:46,151 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T08:50:46,152 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/469387a2cdb6,34461,1732438246017 from backup master directory 2024-11-24T08:50:46,153 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x10070ed1ed10001, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:50:46,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/469387a2cdb6,34461,1732438246017 2024-11-24T08:50:46,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:50:46,154 WARN [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:50:46,154 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=469387a2cdb6,34461,1732438246017 2024-11-24T08:50:46,161 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/hbase.id] with ID: c39b69a0-d953-4928-a6e3-8581a57376bb 2024-11-24T08:50:46,161 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/.tmp/hbase.id 2024-11-24T08:50:46,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:50:46,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:50:46,179 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/.tmp/hbase.id]:[hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/hbase.id] 2024-11-24T08:50:46,197 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:50:46,198 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T08:50:46,199 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-24T08:50:46,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:50:46,201 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x10070ed1ed10001, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:50:46,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:50:46,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:50:46,211 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:50:46,212 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T08:50:46,212 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:50:46,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:50:46,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:50:46,236 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store 2024-11-24T08:50:46,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:50:46,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:50:46,252 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:50:46,252 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:50:46,252 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:50:46,252 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:50:46,252 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:50:46,252 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:50:46,252 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:50:46,252 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732438246252Disabling compacts and flushes for region at 1732438246252Disabling writes for close at 1732438246252Writing region close event to WAL at 1732438246252Closed at 1732438246252 2024-11-24T08:50:46,253 WARN [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/.initializing 2024-11-24T08:50:46,253 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/WALs/469387a2cdb6,34461,1732438246017 2024-11-24T08:50:46,256 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=469387a2cdb6%2C34461%2C1732438246017, suffix=, logDir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/WALs/469387a2cdb6,34461,1732438246017, archiveDir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/oldWALs, maxLogs=10 2024-11-24T08:50:46,257 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C34461%2C1732438246017.1732438246256 2024-11-24T08:50:46,265 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/WALs/469387a2cdb6,34461,1732438246017/469387a2cdb6%2C34461%2C1732438246017.1732438246256 2024-11-24T08:50:46,271 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43467:43467),(127.0.0.1/127.0.0.1:46531:46531)] 2024-11-24T08:50:46,278 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:50:46,279 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:50:46,279 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:50:46,279 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:50:46,281 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:50:46,282 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T08:50:46,283 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:50:46,283 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:50:46,283 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:50:46,284 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T08:50:46,284 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:50:46,285 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:50:46,285 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:50:46,287 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T08:50:46,287 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:50:46,287 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:50:46,287 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:50:46,289 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T08:50:46,289 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:50:46,289 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:50:46,290 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:50:46,290 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:50:46,291 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:50:46,292 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:50:46,292 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:50:46,293 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T08:50:46,294 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:50:46,296 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:50:46,297 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=803553, jitterRate=0.021771356463432312}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T08:50:46,298 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732438246279Initializing all the Stores at 1732438246280 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438246280Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438246280Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438246280Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438246280Cleaning up temporary data from old regions at 1732438246292 (+12 ms)Region opened successfully at 1732438246298 (+6 ms) 2024-11-24T08:50:46,298 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T08:50:46,302 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@273e503d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=469387a2cdb6/172.17.0.2:0 2024-11-24T08:50:46,303 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T08:50:46,303 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T08:50:46,303 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T08:50:46,303 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T08:50:46,304 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T08:50:46,304 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T08:50:46,304 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T08:50:46,306 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T08:50:46,307 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T08:50:46,308 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T08:50:46,309 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T08:50:46,310 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T08:50:46,311 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T08:50:46,311 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T08:50:46,312 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T08:50:46,313 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T08:50:46,314 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T08:50:46,314 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T08:50:46,316 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T08:50:46,316 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T08:50:46,318 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x10070ed1ed10001, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:50:46,318 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x10070ed1ed10001, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:50:46,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:50:46,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:50:46,318 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=469387a2cdb6,34461,1732438246017, sessionid=0x10070ed1ed10000, setting cluster-up flag (Was=false) 2024-11-24T08:50:46,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:50:46,319 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x10070ed1ed10001, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:50:46,322 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T08:50:46,323 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=469387a2cdb6,34461,1732438246017 2024-11-24T08:50:46,324 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x10070ed1ed10001, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:50:46,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:50:46,327 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T08:50:46,328 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=469387a2cdb6,34461,1732438246017 2024-11-24T08:50:46,329 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T08:50:46,334 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T08:50:46,335 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T08:50:46,335 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T08:50:46,335 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 469387a2cdb6,34461,1732438246017 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T08:50:46,336 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:50:46,336 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:50:46,336 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:50:46,336 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:50:46,336 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/469387a2cdb6:0, corePoolSize=10, maxPoolSize=10 2024-11-24T08:50:46,336 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:50:46,337 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/469387a2cdb6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:50:46,337 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:50:46,338 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:50:46,339 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T08:50:46,339 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732438276339 2024-11-24T08:50:46,339 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T08:50:46,339 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T08:50:46,339 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T08:50:46,339 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T08:50:46,339 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T08:50:46,339 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T08:50:46,340 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:50:46,340 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:50:46,340 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T08:50:46,340 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T08:50:46,340 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T08:50:46,340 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T08:50:46,345 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T08:50:46,345 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T08:50:46,349 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.large.0-1732438246345,5,FailOnTimeoutGroup] 2024-11-24T08:50:46,353 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.small.0-1732438246349,5,FailOnTimeoutGroup] 2024-11-24T08:50:46,353 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:50:46,353 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T08:50:46,353 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T08:50:46,353 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T08:50:46,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:50:46,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:50:46,364 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T08:50:46,364 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53 2024-11-24T08:50:46,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:50:46,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:50:46,376 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:50:46,387 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:50:46,389 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:50:46,389 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:50:46,390 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:50:46,390 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:50:46,391 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:50:46,391 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:50:46,392 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:50:46,392 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:50:46,393 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:50:46,393 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:50:46,394 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:50:46,395 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:50:46,396 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:50:46,396 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:50:46,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:50:46,397 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:50:46,397 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740 2024-11-24T08:50:46,398 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740 2024-11-24T08:50:46,401 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:50:46,401 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:50:46,401 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:50:46,403 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:50:46,408 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:50:46,409 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=816938, jitterRate=0.03879067301750183}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:50:46,409 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732438246376Initializing all the Stores at 1732438246377 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438246377Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438246387 (+10 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438246387Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438246387Cleaning up temporary data from old regions at 1732438246401 (+14 ms)Region opened successfully at 1732438246409 (+8 ms) 2024-11-24T08:50:46,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:50:46,410 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:50:46,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:50:46,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:50:46,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:50:46,412 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:50:46,412 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732438246410Disabling compacts and flushes for region at 1732438246410Disabling writes for close at 1732438246410Writing region close event to WAL at 1732438246412 (+2 ms)Closed at 1732438246412 2024-11-24T08:50:46,413 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:50:46,413 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T08:50:46,413 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T08:50:46,415 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:50:46,416 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T08:50:46,428 INFO [RS:0;469387a2cdb6:37059 {}] regionserver.HRegionServer(746): ClusterId : c39b69a0-d953-4928-a6e3-8581a57376bb 2024-11-24T08:50:46,428 DEBUG [RS:0;469387a2cdb6:37059 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T08:50:46,431 DEBUG [RS:0;469387a2cdb6:37059 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T08:50:46,431 DEBUG [RS:0;469387a2cdb6:37059 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T08:50:46,433 DEBUG [RS:0;469387a2cdb6:37059 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T08:50:46,433 DEBUG [RS:0;469387a2cdb6:37059 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5548e3f9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=469387a2cdb6/172.17.0.2:0 2024-11-24T08:50:46,449 DEBUG [RS:0;469387a2cdb6:37059 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;469387a2cdb6:37059 2024-11-24T08:50:46,450 INFO [RS:0;469387a2cdb6:37059 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T08:50:46,450 INFO [RS:0;469387a2cdb6:37059 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T08:50:46,450 DEBUG [RS:0;469387a2cdb6:37059 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T08:50:46,451 INFO [RS:0;469387a2cdb6:37059 {}] regionserver.HRegionServer(2659): reportForDuty to master=469387a2cdb6,34461,1732438246017 with port=37059, startcode=1732438246097 2024-11-24T08:50:46,451 DEBUG [RS:0;469387a2cdb6:37059 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T08:50:46,454 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34251, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T08:50:46,454 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34461 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 469387a2cdb6,37059,1732438246097 2024-11-24T08:50:46,454 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34461 {}] master.ServerManager(517): Registering regionserver=469387a2cdb6,37059,1732438246097 2024-11-24T08:50:46,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:46,456 DEBUG [RS:0;469387a2cdb6:37059 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53 2024-11-24T08:50:46,456 DEBUG [RS:0;469387a2cdb6:37059 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45341 2024-11-24T08:50:46,456 DEBUG [RS:0;469387a2cdb6:37059 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T08:50:46,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:50:46,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:46,458 DEBUG [RS:0;469387a2cdb6:37059 {}] zookeeper.ZKUtil(111): regionserver:37059-0x10070ed1ed10001, quorum=127.0.0.1:55564, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/469387a2cdb6,37059,1732438246097 2024-11-24T08:50:46,458 WARN [RS:0;469387a2cdb6:37059 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:50:46,458 INFO [RS:0;469387a2cdb6:37059 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:50:46,458 DEBUG [RS:0;469387a2cdb6:37059 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/WALs/469387a2cdb6,37059,1732438246097 2024-11-24T08:50:46,462 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [469387a2cdb6,37059,1732438246097] 2024-11-24T08:50:46,467 INFO [RS:0;469387a2cdb6:37059 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T08:50:46,469 INFO [RS:0;469387a2cdb6:37059 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T08:50:46,469 INFO [RS:0;469387a2cdb6:37059 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T08:50:46,469 INFO [RS:0;469387a2cdb6:37059 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:50:46,473 INFO [RS:0;469387a2cdb6:37059 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T08:50:46,474 INFO [RS:0;469387a2cdb6:37059 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T08:50:46,474 INFO [RS:0;469387a2cdb6:37059 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T08:50:46,474 DEBUG [RS:0;469387a2cdb6:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:50:46,474 DEBUG [RS:0;469387a2cdb6:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:50:46,474 DEBUG [RS:0;469387a2cdb6:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:50:46,475 DEBUG [RS:0;469387a2cdb6:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:50:46,475 DEBUG [RS:0;469387a2cdb6:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:50:46,475 DEBUG [RS:0;469387a2cdb6:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/469387a2cdb6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:50:46,475 DEBUG [RS:0;469387a2cdb6:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:50:46,475 DEBUG [RS:0;469387a2cdb6:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:50:46,475 DEBUG [RS:0;469387a2cdb6:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:50:46,475 DEBUG [RS:0;469387a2cdb6:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:50:46,475 DEBUG [RS:0;469387a2cdb6:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:50:46,475 DEBUG [RS:0;469387a2cdb6:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:50:46,475 DEBUG [RS:0;469387a2cdb6:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/469387a2cdb6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:50:46,475 DEBUG [RS:0;469387a2cdb6:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:50:46,477 INFO [RS:0;469387a2cdb6:37059 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:50:46,477 INFO [RS:0;469387a2cdb6:37059 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:50:46,477 INFO [RS:0;469387a2cdb6:37059 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:50:46,477 INFO [RS:0;469387a2cdb6:37059 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T08:50:46,478 INFO [RS:0;469387a2cdb6:37059 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T08:50:46,478 INFO [RS:0;469387a2cdb6:37059 {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,37059,1732438246097-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:50:46,499 INFO [RS:0;469387a2cdb6:37059 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T08:50:46,499 INFO [RS:0;469387a2cdb6:37059 {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,37059,1732438246097-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:50:46,499 INFO [RS:0;469387a2cdb6:37059 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:50:46,499 INFO [RS:0;469387a2cdb6:37059 {}] regionserver.Replication(171): 469387a2cdb6,37059,1732438246097 started 2024-11-24T08:50:46,520 INFO [RS:0;469387a2cdb6:37059 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:50:46,520 INFO [RS:0;469387a2cdb6:37059 {}] regionserver.HRegionServer(1482): Serving as 469387a2cdb6,37059,1732438246097, RpcServer on 469387a2cdb6/172.17.0.2:37059, sessionid=0x10070ed1ed10001 2024-11-24T08:50:46,520 DEBUG [RS:0;469387a2cdb6:37059 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T08:50:46,520 DEBUG [RS:0;469387a2cdb6:37059 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 469387a2cdb6,37059,1732438246097 2024-11-24T08:50:46,520 DEBUG [RS:0;469387a2cdb6:37059 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '469387a2cdb6,37059,1732438246097' 2024-11-24T08:50:46,520 DEBUG [RS:0;469387a2cdb6:37059 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T08:50:46,521 DEBUG [RS:0;469387a2cdb6:37059 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T08:50:46,521 DEBUG [RS:0;469387a2cdb6:37059 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T08:50:46,521 DEBUG [RS:0;469387a2cdb6:37059 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T08:50:46,521 DEBUG [RS:0;469387a2cdb6:37059 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 469387a2cdb6,37059,1732438246097 2024-11-24T08:50:46,521 DEBUG [RS:0;469387a2cdb6:37059 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '469387a2cdb6,37059,1732438246097' 2024-11-24T08:50:46,521 DEBUG [RS:0;469387a2cdb6:37059 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T08:50:46,522 DEBUG [RS:0;469387a2cdb6:37059 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T08:50:46,522 DEBUG [RS:0;469387a2cdb6:37059 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T08:50:46,522 INFO [RS:0;469387a2cdb6:37059 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T08:50:46,522 INFO [RS:0;469387a2cdb6:37059 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T08:50:46,566 WARN [469387a2cdb6:34461 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T08:50:46,624 INFO [RS:0;469387a2cdb6:37059 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=469387a2cdb6%2C37059%2C1732438246097, suffix=, logDir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/WALs/469387a2cdb6,37059,1732438246097, archiveDir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/oldWALs, maxLogs=32 2024-11-24T08:50:46,625 INFO [RS:0;469387a2cdb6:37059 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C37059%2C1732438246097.1732438246625 2024-11-24T08:50:46,667 INFO [RS:0;469387a2cdb6:37059 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/WALs/469387a2cdb6,37059,1732438246097/469387a2cdb6%2C37059%2C1732438246097.1732438246625 2024-11-24T08:50:46,668 DEBUG [RS:0;469387a2cdb6:37059 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46531:46531),(127.0.0.1/127.0.0.1:43467:43467)] 2024-11-24T08:50:46,816 DEBUG [469387a2cdb6:34461 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T08:50:46,817 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=469387a2cdb6,37059,1732438246097 2024-11-24T08:50:46,819 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 469387a2cdb6,37059,1732438246097, state=OPENING 2024-11-24T08:50:46,821 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T08:50:46,822 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x10070ed1ed10001, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:50:46,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:50:46,823 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:50:46,823 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:50:46,823 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=469387a2cdb6,37059,1732438246097}] 2024-11-24T08:50:46,823 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:50:46,980 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T08:50:46,985 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54941, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T08:50:46,989 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T08:50:46,990 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:50:46,992 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=469387a2cdb6%2C37059%2C1732438246097.meta, suffix=.meta, logDir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/WALs/469387a2cdb6,37059,1732438246097, archiveDir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/oldWALs, maxLogs=32 2024-11-24T08:50:46,992 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C37059%2C1732438246097.meta.1732438246992.meta 2024-11-24T08:50:46,999 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/WALs/469387a2cdb6,37059,1732438246097/469387a2cdb6%2C37059%2C1732438246097.meta.1732438246992.meta 2024-11-24T08:50:47,000 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46531:46531),(127.0.0.1/127.0.0.1:43467:43467)] 2024-11-24T08:50:47,001 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:50:47,001 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T08:50:47,001 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T08:50:47,001 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T08:50:47,001 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T08:50:47,002 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:50:47,002 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T08:50:47,002 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T08:50:47,003 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:50:47,004 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:50:47,004 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:50:47,004 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:50:47,004 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:50:47,005 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:50:47,005 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:50:47,005 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:50:47,005 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:50:47,006 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:50:47,006 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:50:47,006 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:50:47,006 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:50:47,006 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:50:47,007 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:50:47,007 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:50:47,007 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:50:47,008 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740 2024-11-24T08:50:47,008 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740 2024-11-24T08:50:47,009 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:50:47,009 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:50:47,010 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:50:47,011 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:50:47,012 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=817092, jitterRate=0.03898720443248749}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:50:47,012 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T08:50:47,012 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732438247002Writing region info on filesystem at 1732438247002Initializing all the Stores at 1732438247003 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438247003Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438247003Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438247003Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438247003Cleaning up temporary data from old regions at 1732438247009 (+6 ms)Running coprocessor post-open hooks at 1732438247012 (+3 ms)Region opened successfully at 1732438247012 2024-11-24T08:50:47,013 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732438246979 2024-11-24T08:50:47,016 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T08:50:47,016 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T08:50:47,017 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=469387a2cdb6,37059,1732438246097 2024-11-24T08:50:47,018 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 469387a2cdb6,37059,1732438246097, state=OPEN 2024-11-24T08:50:47,020 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x10070ed1ed10001, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:50:47,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:50:47,020 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=469387a2cdb6,37059,1732438246097 2024-11-24T08:50:47,020 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:50:47,020 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:50:47,023 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T08:50:47,023 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=469387a2cdb6,37059,1732438246097 in 197 msec 2024-11-24T08:50:47,025 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T08:50:47,025 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 610 msec 2024-11-24T08:50:47,026 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:50:47,026 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T08:50:47,027 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:50:47,027 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=469387a2cdb6,37059,1732438246097, seqNum=-1] 2024-11-24T08:50:47,027 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:50:47,028 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44749, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:50:47,033 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 699 msec 2024-11-24T08:50:47,034 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732438247034, completionTime=-1 2024-11-24T08:50:47,034 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T08:50:47,034 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T08:50:47,035 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T08:50:47,036 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732438307035 2024-11-24T08:50:47,036 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732438367036 2024-11-24T08:50:47,036 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-24T08:50:47,036 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,34461,1732438246017-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:50:47,036 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,34461,1732438246017-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:50:47,036 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,34461,1732438246017-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:50:47,036 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-469387a2cdb6:34461, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:50:47,036 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T08:50:47,037 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T08:50:47,038 DEBUG [master/469387a2cdb6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T08:50:47,040 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.886sec 2024-11-24T08:50:47,040 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T08:50:47,040 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T08:50:47,040 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T08:50:47,040 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T08:50:47,040 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T08:50:47,040 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,34461,1732438246017-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:50:47,040 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,34461,1732438246017-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T08:50:47,042 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T08:50:47,042 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T08:50:47,042 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,34461,1732438246017-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:50:47,128 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f95ec8e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:50:47,128 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 469387a2cdb6,34461,-1 for getting cluster id 2024-11-24T08:50:47,128 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T08:50:47,129 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c39b69a0-d953-4928-a6e3-8581a57376bb' 2024-11-24T08:50:47,130 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T08:50:47,130 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c39b69a0-d953-4928-a6e3-8581a57376bb" 2024-11-24T08:50:47,130 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@89cdc3f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:50:47,130 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [469387a2cdb6,34461,-1] 2024-11-24T08:50:47,130 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T08:50:47,130 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:50:47,131 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34900, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T08:50:47,132 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@221887c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:50:47,132 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:50:47,133 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=469387a2cdb6,37059,1732438246097, seqNum=-1] 2024-11-24T08:50:47,133 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:50:47,134 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37692, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:50:47,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=469387a2cdb6,34461,1732438246017 2024-11-24T08:50:47,136 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:50:47,138 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T08:50:47,138 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T08:50:47,139 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 469387a2cdb6,34461,1732438246017 2024-11-24T08:50:47,139 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5ecc3b76 2024-11-24T08:50:47,139 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T08:50:47,140 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34916, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T08:50:47,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34461 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T08:50:47,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34461 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T08:50:47,141 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34461 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:50:47,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34461 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-24T08:50:47,143 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T08:50:47,143 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:50:47,143 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34461 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-24T08:50:47,144 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T08:50:47,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34461 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T08:50:47,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741835_1011 (size=381) 2024-11-24T08:50:47,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741835_1011 (size=381) 2024-11-24T08:50:47,154 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 778da2ad8102f000e6527ce33247b88f, NAME => 'TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53 2024-11-24T08:50:47,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741836_1012 (size=64) 2024-11-24T08:50:47,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741836_1012 (size=64) 2024-11-24T08:50:47,161 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:50:47,161 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 778da2ad8102f000e6527ce33247b88f, disabling compactions & flushes 2024-11-24T08:50:47,161 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f. 2024-11-24T08:50:47,161 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f. 2024-11-24T08:50:47,161 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f. after waiting 0 ms 2024-11-24T08:50:47,161 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f. 2024-11-24T08:50:47,161 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f. 2024-11-24T08:50:47,162 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 778da2ad8102f000e6527ce33247b88f: Waiting for close lock at 1732438247161Disabling compacts and flushes for region at 1732438247161Disabling writes for close at 1732438247161Writing region close event to WAL at 1732438247161Closed at 1732438247161 2024-11-24T08:50:47,163 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T08:50:47,163 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732438247163"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732438247163"}]},"ts":"1732438247163"} 2024-11-24T08:50:47,166 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T08:50:47,167 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T08:50:47,167 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732438247167"}]},"ts":"1732438247167"} 2024-11-24T08:50:47,169 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-24T08:50:47,169 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=778da2ad8102f000e6527ce33247b88f, ASSIGN}] 2024-11-24T08:50:47,171 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=778da2ad8102f000e6527ce33247b88f, ASSIGN 2024-11-24T08:50:47,172 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=778da2ad8102f000e6527ce33247b88f, ASSIGN; state=OFFLINE, location=469387a2cdb6,37059,1732438246097; forceNewPlan=false, retain=false 2024-11-24T08:50:47,323 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=778da2ad8102f000e6527ce33247b88f, regionState=OPENING, regionLocation=469387a2cdb6,37059,1732438246097 2024-11-24T08:50:47,325 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=778da2ad8102f000e6527ce33247b88f, ASSIGN because future has completed 2024-11-24T08:50:47,326 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 778da2ad8102f000e6527ce33247b88f, server=469387a2cdb6,37059,1732438246097}] 2024-11-24T08:50:47,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:47,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:47,488 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f. 2024-11-24T08:50:47,488 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 778da2ad8102f000e6527ce33247b88f, NAME => 'TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:50:47,489 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:50:47,489 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:50:47,489 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:50:47,489 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:50:47,491 INFO [StoreOpener-778da2ad8102f000e6527ce33247b88f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:50:47,493 INFO [StoreOpener-778da2ad8102f000e6527ce33247b88f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 778da2ad8102f000e6527ce33247b88f columnFamilyName info 2024-11-24T08:50:47,493 DEBUG [StoreOpener-778da2ad8102f000e6527ce33247b88f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:50:47,494 INFO [StoreOpener-778da2ad8102f000e6527ce33247b88f-1 {}] regionserver.HStore(327): Store=778da2ad8102f000e6527ce33247b88f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:50:47,494 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:50:47,495 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f 2024-11-24T08:50:47,495 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f 2024-11-24T08:50:47,496 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:50:47,496 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:50:47,498 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:50:47,500 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:50:47,501 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 778da2ad8102f000e6527ce33247b88f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=758037, jitterRate=-0.03610694408416748}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T08:50:47,501 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:50:47,501 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 778da2ad8102f000e6527ce33247b88f: Running coprocessor pre-open hook at 1732438247490Writing region info on filesystem at 1732438247490Initializing all the Stores at 1732438247491 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438247491Cleaning up temporary data from old regions at 1732438247496 (+5 ms)Running coprocessor post-open hooks at 1732438247501 (+5 ms)Region opened successfully at 1732438247501 2024-11-24T08:50:47,502 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f., pid=6, masterSystemTime=1732438247479 2024-11-24T08:50:47,504 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f. 2024-11-24T08:50:47,504 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f. 2024-11-24T08:50:47,505 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=778da2ad8102f000e6527ce33247b88f, regionState=OPEN, openSeqNum=2, regionLocation=469387a2cdb6,37059,1732438246097 2024-11-24T08:50:47,507 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 778da2ad8102f000e6527ce33247b88f, server=469387a2cdb6,37059,1732438246097 because future has completed 2024-11-24T08:50:47,510 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T08:50:47,510 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 778da2ad8102f000e6527ce33247b88f, server=469387a2cdb6,37059,1732438246097 in 182 msec 2024-11-24T08:50:47,512 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T08:50:47,512 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=778da2ad8102f000e6527ce33247b88f, ASSIGN in 341 msec 2024-11-24T08:50:47,513 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T08:50:47,513 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732438247513"}]},"ts":"1732438247513"} 2024-11-24T08:50:47,515 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-24T08:50:47,516 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T08:50:47,518 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 375 msec 2024-11-24T08:50:48,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:48,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:49,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:49,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:49,625 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:49,626 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:49,626 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:49,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:49,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:49,628 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:49,630 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:49,630 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:49,649 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:49,649 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:49,649 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:49,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:49,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:49,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:49,653 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:49,653 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:49,653 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:49,655 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:50,160 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T08:50:50,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:50,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:50,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:50,163 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:50,163 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:50,163 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:50,165 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:50,165 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:50,187 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:50,187 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:50,187 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:50,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:50,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:50,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:50,192 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:50,192 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:50,192 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:50,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:50:50,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:50,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:51,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:51,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:52,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:52,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:52,467 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T08:50:52,469 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-24T08:50:53,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:53,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:54,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:54,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:55,318 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T08:50:55,318 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T08:50:55,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:50:55,319 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T08:50:55,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T08:50:55,319 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-24T08:50:55,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-24T08:50:55,319 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-24T08:50:55,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:55,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:56,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:56,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:57,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34461 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T08:50:57,188 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-24T08:50:57,188 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-24T08:50:57,190 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-24T08:50:57,191 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f. 2024-11-24T08:50:57,193 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f., hostname=469387a2cdb6,37059,1732438246097, seqNum=2] 2024-11-24T08:50:57,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:50:57,208 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 778da2ad8102f000e6527ce33247b88f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:50:57,227 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/f011275188ce4182bfa57a8c34b8bd58 is 1080, key is row0001/info:/1732438257194/Put/seqid=0 2024-11-24T08:50:57,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741837_1013 (size=12509) 2024-11-24T08:50:57,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741837_1013 (size=12509) 2024-11-24T08:50:57,248 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/f011275188ce4182bfa57a8c34b8bd58 2024-11-24T08:50:57,256 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/f011275188ce4182bfa57a8c34b8bd58 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/f011275188ce4182bfa57a8c34b8bd58 2024-11-24T08:50:57,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=778da2ad8102f000e6527ce33247b88f, server=469387a2cdb6,37059,1732438246097 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-24T08:50:57,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:37692 deadline: 1732438267258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=778da2ad8102f000e6527ce33247b88f, server=469387a2cdb6,37059,1732438246097 2024-11-24T08:50:57,263 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/f011275188ce4182bfa57a8c34b8bd58, entries=7, sequenceid=11, filesize=12.2 K 2024-11-24T08:50:57,264 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 778da2ad8102f000e6527ce33247b88f in 56ms, sequenceid=11, compaction requested=false 2024-11-24T08:50:57,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 778da2ad8102f000e6527ce33247b88f: 2024-11-24T08:50:57,265 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f., hostname=469387a2cdb6,37059,1732438246097, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f., hostname=469387a2cdb6,37059,1732438246097, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=778da2ad8102f000e6527ce33247b88f, server=469387a2cdb6,37059,1732438246097 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T08:50:57,265 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f., hostname=469387a2cdb6,37059,1732438246097, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=778da2ad8102f000e6527ce33247b88f, server=469387a2cdb6,37059,1732438246097 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T08:50:57,265 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f., hostname=469387a2cdb6,37059,1732438246097, seqNum=2 because the exception is null or not the one we care about 2024-11-24T08:50:57,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:57,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:58,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:58,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:59,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:50:59,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:00,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:00,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:01,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:01,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:02,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:02,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:03,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:03,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:04,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:04,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:05,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:05,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:06,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:06,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:07,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:51:07,318 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 778da2ad8102f000e6527ce33247b88f 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-24T08:51:07,325 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/56664ec135124b66ad6548ae93715f3c is 1080, key is row0008/info:/1732438257209/Put/seqid=0 2024-11-24T08:51:07,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741838_1014 (size=29761) 2024-11-24T08:51:07,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741838_1014 (size=29761) 2024-11-24T08:51:07,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:07,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:07,733 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/56664ec135124b66ad6548ae93715f3c 2024-11-24T08:51:07,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/56664ec135124b66ad6548ae93715f3c as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/56664ec135124b66ad6548ae93715f3c 2024-11-24T08:51:07,755 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/56664ec135124b66ad6548ae93715f3c, entries=23, sequenceid=37, filesize=29.1 K 2024-11-24T08:51:07,756 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 778da2ad8102f000e6527ce33247b88f in 438ms, sequenceid=37, compaction requested=false 2024-11-24T08:51:07,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 778da2ad8102f000e6527ce33247b88f: 2024-11-24T08:51:07,757 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-24T08:51:07,757 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:51:07,757 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/56664ec135124b66ad6548ae93715f3c because midkey is the same as first or last row 2024-11-24T08:51:08,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:08,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:09,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:51:09,336 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 778da2ad8102f000e6527ce33247b88f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:51:09,341 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/779d4020c32a405fa13a8afd7712f931 is 1080, key is row0031/info:/1732438267319/Put/seqid=0 2024-11-24T08:51:09,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741839_1015 (size=12509) 2024-11-24T08:51:09,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741839_1015 (size=12509) 2024-11-24T08:51:09,356 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/779d4020c32a405fa13a8afd7712f931 2024-11-24T08:51:09,364 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/779d4020c32a405fa13a8afd7712f931 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/779d4020c32a405fa13a8afd7712f931 2024-11-24T08:51:09,370 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/779d4020c32a405fa13a8afd7712f931, entries=7, sequenceid=47, filesize=12.2 K 2024-11-24T08:51:09,371 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 778da2ad8102f000e6527ce33247b88f in 35ms, sequenceid=47, compaction requested=true 2024-11-24T08:51:09,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 778da2ad8102f000e6527ce33247b88f: 2024-11-24T08:51:09,372 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-24T08:51:09,372 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:51:09,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:51:09,372 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/56664ec135124b66ad6548ae93715f3c because midkey is the same as first or last row 2024-11-24T08:51:09,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 778da2ad8102f000e6527ce33247b88f:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:51:09,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:09,372 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:51:09,372 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 778da2ad8102f000e6527ce33247b88f 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-24T08:51:09,374 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:51:09,374 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1541): 778da2ad8102f000e6527ce33247b88f/info is initiating minor compaction (all files) 2024-11-24T08:51:09,374 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 778da2ad8102f000e6527ce33247b88f/info in TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f. 2024-11-24T08:51:09,374 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/f011275188ce4182bfa57a8c34b8bd58, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/56664ec135124b66ad6548ae93715f3c, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/779d4020c32a405fa13a8afd7712f931] into tmpdir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp, totalSize=53.5 K 2024-11-24T08:51:09,375 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting f011275188ce4182bfa57a8c34b8bd58, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732438257194 2024-11-24T08:51:09,375 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting 56664ec135124b66ad6548ae93715f3c, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732438257209 2024-11-24T08:51:09,376 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting 779d4020c32a405fa13a8afd7712f931, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732438267319 2024-11-24T08:51:09,378 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/454bdbc869f4490d8b327d75e9518118 is 1080, key is row0038/info:/1732438269337/Put/seqid=0 2024-11-24T08:51:09,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741840_1016 (size=20064) 2024-11-24T08:51:09,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741840_1016 (size=20064) 2024-11-24T08:51:09,386 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=64 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/454bdbc869f4490d8b327d75e9518118 2024-11-24T08:51:09,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/454bdbc869f4490d8b327d75e9518118 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/454bdbc869f4490d8b327d75e9518118 2024-11-24T08:51:09,396 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 778da2ad8102f000e6527ce33247b88f#info#compaction#58 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:51:09,397 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/0d5051bbfd9d4196854865d44184afb0 is 1080, key is row0001/info:/1732438257194/Put/seqid=0 2024-11-24T08:51:09,401 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/454bdbc869f4490d8b327d75e9518118, entries=14, sequenceid=64, filesize=19.6 K 2024-11-24T08:51:09,402 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=10.51 KB/10760 for 778da2ad8102f000e6527ce33247b88f in 30ms, sequenceid=64, compaction requested=false 2024-11-24T08:51:09,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 778da2ad8102f000e6527ce33247b88f: 2024-11-24T08:51:09,403 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.1 K, sizeToCheck=16.0 K 2024-11-24T08:51:09,403 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:51:09,403 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/56664ec135124b66ad6548ae93715f3c because midkey is the same as first or last row 2024-11-24T08:51:09,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:51:09,403 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 778da2ad8102f000e6527ce33247b88f 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-24T08:51:09,412 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/8bdb53885c734d92b56f42c3c4de73f4 is 1080, key is row0052/info:/1732438269374/Put/seqid=0 2024-11-24T08:51:09,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741841_1017 (size=44978) 2024-11-24T08:51:09,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741842_1018 (size=16817) 2024-11-24T08:51:09,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741842_1018 (size=16817) 2024-11-24T08:51:09,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741841_1017 (size=44978) 2024-11-24T08:51:09,423 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/8bdb53885c734d92b56f42c3c4de73f4 2024-11-24T08:51:09,429 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/0d5051bbfd9d4196854865d44184afb0 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/0d5051bbfd9d4196854865d44184afb0 2024-11-24T08:51:09,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/8bdb53885c734d92b56f42c3c4de73f4 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/8bdb53885c734d92b56f42c3c4de73f4 2024-11-24T08:51:09,436 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/8bdb53885c734d92b56f42c3c4de73f4, entries=11, sequenceid=78, filesize=16.4 K 2024-11-24T08:51:09,437 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=2.10 KB/2152 for 778da2ad8102f000e6527ce33247b88f in 34ms, sequenceid=78, compaction requested=false 2024-11-24T08:51:09,437 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 778da2ad8102f000e6527ce33247b88f/info of 778da2ad8102f000e6527ce33247b88f into 0d5051bbfd9d4196854865d44184afb0(size=43.9 K), total size for store is 79.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:51:09,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 778da2ad8102f000e6527ce33247b88f: 2024-11-24T08:51:09,437 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 778da2ad8102f000e6527ce33247b88f: 2024-11-24T08:51:09,437 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f., storeName=778da2ad8102f000e6527ce33247b88f/info, priority=13, startTime=1732438269372; duration=0sec 2024-11-24T08:51:09,437 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=79.9 K, sizeToCheck=16.0 K 2024-11-24T08:51:09,437 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:51:09,437 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=79.9 K, sizeToCheck=16.0 K 2024-11-24T08:51:09,437 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:51:09,437 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/0d5051bbfd9d4196854865d44184afb0 because midkey is the same as first or last row 2024-11-24T08:51:09,437 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/0d5051bbfd9d4196854865d44184afb0 because midkey is the same as first or last row 2024-11-24T08:51:09,438 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=79.9 K, sizeToCheck=16.0 K 2024-11-24T08:51:09,438 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:51:09,438 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/0d5051bbfd9d4196854865d44184afb0 because midkey is the same as first or last row 2024-11-24T08:51:09,438 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=79.9 K, sizeToCheck=16.0 K 2024-11-24T08:51:09,438 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:51:09,438 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/0d5051bbfd9d4196854865d44184afb0 because midkey is the same as first or last row 2024-11-24T08:51:09,438 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:09,438 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 778da2ad8102f000e6527ce33247b88f:info 2024-11-24T08:51:09,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:09,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:10,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:10,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:11,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:51:11,424 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 778da2ad8102f000e6527ce33247b88f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:51:11,428 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/5f1d5bd6f5c1498881a19b6bc9dbb555 is 1080, key is row0063/info:/1732438269405/Put/seqid=0 2024-11-24T08:51:11,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741843_1019 (size=12509) 2024-11-24T08:51:11,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741843_1019 (size=12509) 2024-11-24T08:51:11,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/5f1d5bd6f5c1498881a19b6bc9dbb555 2024-11-24T08:51:11,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/5f1d5bd6f5c1498881a19b6bc9dbb555 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/5f1d5bd6f5c1498881a19b6bc9dbb555 2024-11-24T08:51:11,445 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/5f1d5bd6f5c1498881a19b6bc9dbb555, entries=7, sequenceid=89, filesize=12.2 K 2024-11-24T08:51:11,446 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 778da2ad8102f000e6527ce33247b88f in 23ms, sequenceid=89, compaction requested=true 2024-11-24T08:51:11,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 778da2ad8102f000e6527ce33247b88f: 2024-11-24T08:51:11,447 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=92.2 K, sizeToCheck=16.0 K 2024-11-24T08:51:11,447 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:51:11,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:51:11,447 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/0d5051bbfd9d4196854865d44184afb0 because midkey is the same as first or last row 2024-11-24T08:51:11,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 778da2ad8102f000e6527ce33247b88f:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:51:11,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:11,447 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-24T08:51:11,447 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 778da2ad8102f000e6527ce33247b88f 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-24T08:51:11,448 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 94368 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-24T08:51:11,448 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1541): 778da2ad8102f000e6527ce33247b88f/info is initiating minor compaction (all files) 2024-11-24T08:51:11,448 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 778da2ad8102f000e6527ce33247b88f/info in TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f. 2024-11-24T08:51:11,449 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/0d5051bbfd9d4196854865d44184afb0, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/454bdbc869f4490d8b327d75e9518118, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/8bdb53885c734d92b56f42c3c4de73f4, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/5f1d5bd6f5c1498881a19b6bc9dbb555] into tmpdir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp, totalSize=92.2 K 2024-11-24T08:51:11,449 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0d5051bbfd9d4196854865d44184afb0, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732438257194 2024-11-24T08:51:11,449 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting 454bdbc869f4490d8b327d75e9518118, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=64, earliestPutTs=1732438269337 2024-11-24T08:51:11,450 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8bdb53885c734d92b56f42c3c4de73f4, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732438269374 2024-11-24T08:51:11,450 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5f1d5bd6f5c1498881a19b6bc9dbb555, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732438269405 2024-11-24T08:51:11,451 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/d04aa409f1a54d419652ba9cd9039cf1 is 1080, key is row0070/info:/1732438271425/Put/seqid=0 2024-11-24T08:51:11,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741844_1020 (size=17894) 2024-11-24T08:51:11,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741844_1020 (size=17894) 2024-11-24T08:51:11,457 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/d04aa409f1a54d419652ba9cd9039cf1 2024-11-24T08:51:11,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/d04aa409f1a54d419652ba9cd9039cf1 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/d04aa409f1a54d419652ba9cd9039cf1 2024-11-24T08:51:11,467 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 778da2ad8102f000e6527ce33247b88f#info#compaction#62 average throughput is 17.70 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:51:11,467 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/84b8ddb34028407a9729ff723a5c2847 is 1080, key is row0001/info:/1732438257194/Put/seqid=0 2024-11-24T08:51:11,470 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/d04aa409f1a54d419652ba9cd9039cf1, entries=12, sequenceid=104, filesize=17.5 K 2024-11-24T08:51:11,471 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=9.46 KB/9684 for 778da2ad8102f000e6527ce33247b88f in 24ms, sequenceid=104, compaction requested=false 2024-11-24T08:51:11,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 778da2ad8102f000e6527ce33247b88f: 2024-11-24T08:51:11,471 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=109.6 K, sizeToCheck=16.0 K 2024-11-24T08:51:11,471 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:51:11,471 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/0d5051bbfd9d4196854865d44184afb0 because midkey is the same as first or last row 2024-11-24T08:51:11,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741845_1021 (size=79720) 2024-11-24T08:51:11,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741845_1021 (size=79720) 2024-11-24T08:51:11,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:51:11,473 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 778da2ad8102f000e6527ce33247b88f 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-24T08:51:11,478 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/c248268c80f04d76a87c7fc4f85bc13b is 1080, key is row0082/info:/1732438271448/Put/seqid=0 2024-11-24T08:51:11,480 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:11,480 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:11,480 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/84b8ddb34028407a9729ff723a5c2847 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/84b8ddb34028407a9729ff723a5c2847 2024-11-24T08:51:11,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741846_1022 (size=16817) 2024-11-24T08:51:11,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741846_1022 (size=16817) 2024-11-24T08:51:11,487 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/c248268c80f04d76a87c7fc4f85bc13b 2024-11-24T08:51:11,488 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 778da2ad8102f000e6527ce33247b88f/info of 778da2ad8102f000e6527ce33247b88f into 84b8ddb34028407a9729ff723a5c2847(size=77.9 K), total size for store is 95.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:51:11,488 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 778da2ad8102f000e6527ce33247b88f: 2024-11-24T08:51:11,488 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f., storeName=778da2ad8102f000e6527ce33247b88f/info, priority=12, startTime=1732438271447; duration=0sec 2024-11-24T08:51:11,488 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=95.3 K, sizeToCheck=16.0 K 2024-11-24T08:51:11,488 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:51:11,488 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=95.3 K, sizeToCheck=16.0 K 2024-11-24T08:51:11,488 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:51:11,489 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=95.3 K, sizeToCheck=16.0 K 2024-11-24T08:51:11,489 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:51:11,490 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:11,490 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:11,490 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 778da2ad8102f000e6527ce33247b88f:info 2024-11-24T08:51:11,491 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34461 {}] assignment.AssignmentManager(1363): Split request from 469387a2cdb6,37059,1732438246097, parent={ENCODED => 778da2ad8102f000e6527ce33247b88f, NAME => 'TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-24T08:51:11,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/c248268c80f04d76a87c7fc4f85bc13b as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/c248268c80f04d76a87c7fc4f85bc13b 2024-11-24T08:51:11,497 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34461 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=469387a2cdb6,37059,1732438246097 2024-11-24T08:51:11,498 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/c248268c80f04d76a87c7fc4f85bc13b, entries=11, sequenceid=118, filesize=16.4 K 2024-11-24T08:51:11,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=4.20 KB/4304 for 778da2ad8102f000e6527ce33247b88f in 26ms, sequenceid=118, compaction requested=true 2024-11-24T08:51:11,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 778da2ad8102f000e6527ce33247b88f: 2024-11-24T08:51:11,500 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=111.7 K, sizeToCheck=16.0 K 2024-11-24T08:51:11,500 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:51:11,500 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=111.7 K, sizeToCheck=16.0 K 2024-11-24T08:51:11,500 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:51:11,500 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=111.7 K, sizeToCheck=16.0 K 2024-11-24T08:51:11,500 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:51:11,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=1 2024-11-24T08:51:11,501 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34461 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=778da2ad8102f000e6527ce33247b88f, daughterA=74de31bc5c2e2d20fe1a281b677a200f, daughterB=bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:11,502 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=778da2ad8102f000e6527ce33247b88f, daughterA=74de31bc5c2e2d20fe1a281b677a200f, daughterB=bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:11,503 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=778da2ad8102f000e6527ce33247b88f, daughterA=74de31bc5c2e2d20fe1a281b677a200f, daughterB=bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:11,503 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=778da2ad8102f000e6527ce33247b88f, daughterA=74de31bc5c2e2d20fe1a281b677a200f, daughterB=bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:11,504 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34461 {}] assignment.AssignmentManager(1363): Split request from 469387a2cdb6,37059,1732438246097, parent={ENCODED => 778da2ad8102f000e6527ce33247b88f, NAME => 'TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-24T08:51:11,505 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34461 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=SPLITTING, location=469387a2cdb6,37059,1732438246097 2024-11-24T08:51:11,506 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34461 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=778da2ad8102f000e6527ce33247b88f, daughterA=bc9cc8490a391834b4f9a6e12d64adff, daughterB=1832ad52cf8d02ca8792d131f64219ae 2024-11-24T08:51:11,506 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(842): Waiting on xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=778da2ad8102f000e6527ce33247b88f, daughterA=bc9cc8490a391834b4f9a6e12d64adff, daughterB=1832ad52cf8d02ca8792d131f64219ae held by pid=7 2024-11-24T08:51:11,509 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=778da2ad8102f000e6527ce33247b88f, UNASSIGN}] 2024-11-24T08:51:11,513 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(162): LOCK_EVENT_WAIT SchemaLocking[serverLocks={},namespaceLocks={hbase=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},tableLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},regionLocks={},peerLocks={},metaLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},globalLocks={}] 2024-11-24T08:51:11,513 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=778da2ad8102f000e6527ce33247b88f, UNASSIGN 2024-11-24T08:51:11,513 DEBUG [PEWorker-4 {}] procedure2.ProcedureExecutor(1511): LOCK_EVENT_WAIT pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=778da2ad8102f000e6527ce33247b88f, daughterA=bc9cc8490a391834b4f9a6e12d64adff, daughterB=1832ad52cf8d02ca8792d131f64219ae 2024-11-24T08:51:11,515 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=778da2ad8102f000e6527ce33247b88f, regionState=CLOSING, regionLocation=469387a2cdb6,37059,1732438246097 2024-11-24T08:51:11,517 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=778da2ad8102f000e6527ce33247b88f, UNASSIGN because future has completed 2024-11-24T08:51:11,517 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-24T08:51:11,518 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure 778da2ad8102f000e6527ce33247b88f, server=469387a2cdb6,37059,1732438246097}] 2024-11-24T08:51:11,674 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(122): Close 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:51:11,674 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-24T08:51:11,675 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1722): Closing 778da2ad8102f000e6527ce33247b88f, disabling compactions & flushes 2024-11-24T08:51:11,675 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f. 2024-11-24T08:51:11,675 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f. 2024-11-24T08:51:11,675 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f. after waiting 0 ms 2024-11-24T08:51:11,675 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f. 2024-11-24T08:51:11,675 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(2902): Flushing 778da2ad8102f000e6527ce33247b88f 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-24T08:51:11,680 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/fce9998727864933865500e725c4420d is 1080, key is row0093/info:/1732438271475/Put/seqid=0 2024-11-24T08:51:11,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741847_1023 (size=9270) 2024-11-24T08:51:11,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741847_1023 (size=9270) 2024-11-24T08:51:11,686 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/fce9998727864933865500e725c4420d 2024-11-24T08:51:11,692 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/.tmp/info/fce9998727864933865500e725c4420d as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/fce9998727864933865500e725c4420d 2024-11-24T08:51:11,698 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/fce9998727864933865500e725c4420d, entries=4, sequenceid=126, filesize=9.1 K 2024-11-24T08:51:11,699 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 778da2ad8102f000e6527ce33247b88f in 24ms, sequenceid=126, compaction requested=true 2024-11-24T08:51:11,700 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/f011275188ce4182bfa57a8c34b8bd58, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/56664ec135124b66ad6548ae93715f3c, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/0d5051bbfd9d4196854865d44184afb0, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/779d4020c32a405fa13a8afd7712f931, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/454bdbc869f4490d8b327d75e9518118, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/8bdb53885c734d92b56f42c3c4de73f4, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/5f1d5bd6f5c1498881a19b6bc9dbb555] to archive 2024-11-24T08:51:11,701 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T08:51:11,703 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/f011275188ce4182bfa57a8c34b8bd58 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/f011275188ce4182bfa57a8c34b8bd58 2024-11-24T08:51:11,704 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/56664ec135124b66ad6548ae93715f3c to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/56664ec135124b66ad6548ae93715f3c 2024-11-24T08:51:11,705 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/0d5051bbfd9d4196854865d44184afb0 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/0d5051bbfd9d4196854865d44184afb0 2024-11-24T08:51:11,706 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/779d4020c32a405fa13a8afd7712f931 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/779d4020c32a405fa13a8afd7712f931 2024-11-24T08:51:11,707 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/454bdbc869f4490d8b327d75e9518118 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/454bdbc869f4490d8b327d75e9518118 2024-11-24T08:51:11,708 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/8bdb53885c734d92b56f42c3c4de73f4 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/8bdb53885c734d92b56f42c3c4de73f4 2024-11-24T08:51:11,709 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/5f1d5bd6f5c1498881a19b6bc9dbb555 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/5f1d5bd6f5c1498881a19b6bc9dbb555 2024-11-24T08:51:11,715 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/recovered.edits/129.seqid, newMaxSeqId=129, maxSeqId=1 2024-11-24T08:51:11,716 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f. 2024-11-24T08:51:11,716 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1676): Region close journal for 778da2ad8102f000e6527ce33247b88f: Waiting for close lock at 1732438271675Running coprocessor pre-close hooks at 1732438271675Disabling compacts and flushes for region at 1732438271675Disabling writes for close at 1732438271675Obtaining lock to block concurrent updates at 1732438271675Preparing flush snapshotting stores in 778da2ad8102f000e6527ce33247b88f at 1732438271675Finished memstore snapshotting TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f., syncing WAL and waiting on mvcc, flushsize=dataSize=4304, getHeapSize=4848, getOffHeapSize=0, getCellsCount=4 at 1732438271676 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f. at 1732438271676Flushing 778da2ad8102f000e6527ce33247b88f/info: creating writer at 1732438271676Flushing 778da2ad8102f000e6527ce33247b88f/info: appending metadata at 1732438271680 (+4 ms)Flushing 778da2ad8102f000e6527ce33247b88f/info: closing flushed file at 1732438271680Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2fe58a6d: reopening flushed file at 1732438271691 (+11 ms)Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 778da2ad8102f000e6527ce33247b88f in 24ms, sequenceid=126, compaction requested=true at 1732438271699 (+8 ms)Writing region close event to WAL at 1732438271712 (+13 ms)Running coprocessor post-close hooks at 1732438271716 (+4 ms)Closed at 1732438271716 2024-11-24T08:51:11,719 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(157): Closed 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:51:11,719 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=778da2ad8102f000e6527ce33247b88f, regionState=CLOSED 2024-11-24T08:51:11,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure 778da2ad8102f000e6527ce33247b88f, server=469387a2cdb6,37059,1732438246097 because future has completed 2024-11-24T08:51:11,725 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-24T08:51:11,725 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; CloseRegionProcedure 778da2ad8102f000e6527ce33247b88f, server=469387a2cdb6,37059,1732438246097 in 205 msec 2024-11-24T08:51:11,728 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-11-24T08:51:11,728 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=778da2ad8102f000e6527ce33247b88f, UNASSIGN in 216 msec 2024-11-24T08:51:11,736 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:51:11,740 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=778da2ad8102f000e6527ce33247b88f, threads=4 2024-11-24T08:51:11,742 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/c248268c80f04d76a87c7fc4f85bc13b for region: 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:51:11,742 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/fce9998727864933865500e725c4420d for region: 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:51:11,742 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/84b8ddb34028407a9729ff723a5c2847 for region: 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:51:11,742 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/d04aa409f1a54d419652ba9cd9039cf1 for region: 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:51:11,754 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/fce9998727864933865500e725c4420d, top=true 2024-11-24T08:51:11,755 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/d04aa409f1a54d419652ba9cd9039cf1, top=true 2024-11-24T08:51:11,760 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/c248268c80f04d76a87c7fc4f85bc13b, top=true 2024-11-24T08:51:11,766 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-d04aa409f1a54d419652ba9cd9039cf1 for child: bfc44d74913e973cb7e0bc7d91d76b5e, parent: 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:51:11,766 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/d04aa409f1a54d419652ba9cd9039cf1 for region: 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:51:11,770 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-fce9998727864933865500e725c4420d for child: bfc44d74913e973cb7e0bc7d91d76b5e, parent: 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:51:11,770 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/fce9998727864933865500e725c4420d for region: 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:51:11,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741848_1024 (size=27) 2024-11-24T08:51:11,772 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-c248268c80f04d76a87c7fc4f85bc13b for child: bfc44d74913e973cb7e0bc7d91d76b5e, parent: 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:51:11,772 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/c248268c80f04d76a87c7fc4f85bc13b for region: 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:51:11,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741848_1024 (size=27) 2024-11-24T08:51:12,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741849_1025 (size=27) 2024-11-24T08:51:12,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741849_1025 (size=27) 2024-11-24T08:51:12,186 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/84b8ddb34028407a9729ff723a5c2847 for region: 778da2ad8102f000e6527ce33247b88f 2024-11-24T08:51:12,189 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 778da2ad8102f000e6527ce33247b88f Daughter A: [hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/74de31bc5c2e2d20fe1a281b677a200f/info/84b8ddb34028407a9729ff723a5c2847.778da2ad8102f000e6527ce33247b88f] storefiles, Daughter B: [hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/84b8ddb34028407a9729ff723a5c2847.778da2ad8102f000e6527ce33247b88f, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-c248268c80f04d76a87c7fc4f85bc13b, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-d04aa409f1a54d419652ba9cd9039cf1, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-fce9998727864933865500e725c4420d] storefiles. 2024-11-24T08:51:12,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741850_1026 (size=71) 2024-11-24T08:51:12,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741850_1026 (size=71) 2024-11-24T08:51:12,203 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:51:12,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741851_1027 (size=71) 2024-11-24T08:51:12,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741851_1027 (size=71) 2024-11-24T08:51:12,228 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:51:12,246 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/74de31bc5c2e2d20fe1a281b677a200f/recovered.edits/129.seqid, newMaxSeqId=129, maxSeqId=-1 2024-11-24T08:51:12,249 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/recovered.edits/129.seqid, newMaxSeqId=129, maxSeqId=-1 2024-11-24T08:51:12,252 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732438272252"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732438272252"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732438272252"}]},"ts":"1732438272252"} 2024-11-24T08:51:12,253 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732438271497.74de31bc5c2e2d20fe1a281b677a200f.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732438272252"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732438272252"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732438272252"}]},"ts":"1732438272252"} 2024-11-24T08:51:12,253 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732438272252"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732438272252"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732438272252"}]},"ts":"1732438272252"} 2024-11-24T08:51:12,275 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=74de31bc5c2e2d20fe1a281b677a200f, ASSIGN}, {pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bfc44d74913e973cb7e0bc7d91d76b5e, ASSIGN}] 2024-11-24T08:51:12,276 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=74de31bc5c2e2d20fe1a281b677a200f, ASSIGN 2024-11-24T08:51:12,277 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bfc44d74913e973cb7e0bc7d91d76b5e, ASSIGN 2024-11-24T08:51:12,277 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bfc44d74913e973cb7e0bc7d91d76b5e, ASSIGN; state=SPLITTING_NEW, location=469387a2cdb6,37059,1732438246097; forceNewPlan=false, retain=false 2024-11-24T08:51:12,277 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=74de31bc5c2e2d20fe1a281b677a200f, ASSIGN; state=SPLITTING_NEW, location=469387a2cdb6,37059,1732438246097; forceNewPlan=false, retain=false 2024-11-24T08:51:12,428 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=74de31bc5c2e2d20fe1a281b677a200f, regionState=OPENING, regionLocation=469387a2cdb6,37059,1732438246097 2024-11-24T08:51:12,428 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=bfc44d74913e973cb7e0bc7d91d76b5e, regionState=OPENING, regionLocation=469387a2cdb6,37059,1732438246097 2024-11-24T08:51:12,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bfc44d74913e973cb7e0bc7d91d76b5e, ASSIGN because future has completed 2024-11-24T08:51:12,431 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure bfc44d74913e973cb7e0bc7d91d76b5e, server=469387a2cdb6,37059,1732438246097}] 2024-11-24T08:51:12,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=74de31bc5c2e2d20fe1a281b677a200f, ASSIGN because future has completed 2024-11-24T08:51:12,432 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 74de31bc5c2e2d20fe1a281b677a200f, server=469387a2cdb6,37059,1732438246097}] 2024-11-24T08:51:12,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:12,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:12,586 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732438271497.74de31bc5c2e2d20fe1a281b677a200f. 2024-11-24T08:51:12,586 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7752): Opening region: {ENCODED => 74de31bc5c2e2d20fe1a281b677a200f, NAME => 'TestLogRolling-testLogRolling,,1732438271497.74de31bc5c2e2d20fe1a281b677a200f.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-24T08:51:12,587 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 74de31bc5c2e2d20fe1a281b677a200f 2024-11-24T08:51:12,587 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732438271497.74de31bc5c2e2d20fe1a281b677a200f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:51:12,587 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7794): checking encryption for 74de31bc5c2e2d20fe1a281b677a200f 2024-11-24T08:51:12,587 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7797): checking classloading for 74de31bc5c2e2d20fe1a281b677a200f 2024-11-24T08:51:12,588 INFO [StoreOpener-74de31bc5c2e2d20fe1a281b677a200f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 74de31bc5c2e2d20fe1a281b677a200f 2024-11-24T08:51:12,589 INFO [StoreOpener-74de31bc5c2e2d20fe1a281b677a200f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 74de31bc5c2e2d20fe1a281b677a200f columnFamilyName info 2024-11-24T08:51:12,589 DEBUG [StoreOpener-74de31bc5c2e2d20fe1a281b677a200f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:51:12,605 DEBUG [StoreOpener-74de31bc5c2e2d20fe1a281b677a200f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/74de31bc5c2e2d20fe1a281b677a200f/info/84b8ddb34028407a9729ff723a5c2847.778da2ad8102f000e6527ce33247b88f->hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/84b8ddb34028407a9729ff723a5c2847-bottom 2024-11-24T08:51:12,605 INFO [StoreOpener-74de31bc5c2e2d20fe1a281b677a200f-1 {}] regionserver.HStore(327): Store=74de31bc5c2e2d20fe1a281b677a200f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:51:12,606 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1038): replaying wal for 74de31bc5c2e2d20fe1a281b677a200f 2024-11-24T08:51:12,607 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/74de31bc5c2e2d20fe1a281b677a200f 2024-11-24T08:51:12,608 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/74de31bc5c2e2d20fe1a281b677a200f 2024-11-24T08:51:12,608 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1048): stopping wal replay for 74de31bc5c2e2d20fe1a281b677a200f 2024-11-24T08:51:12,608 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1060): Cleaning up temporary data for 74de31bc5c2e2d20fe1a281b677a200f 2024-11-24T08:51:12,610 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1093): writing seq id for 74de31bc5c2e2d20fe1a281b677a200f 2024-11-24T08:51:12,611 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1114): Opened 74de31bc5c2e2d20fe1a281b677a200f; next sequenceid=130; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=716942, jitterRate=-0.08836235105991364}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T08:51:12,611 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 74de31bc5c2e2d20fe1a281b677a200f 2024-11-24T08:51:12,612 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1006): Region open journal for 74de31bc5c2e2d20fe1a281b677a200f: Running coprocessor pre-open hook at 1732438272587Writing region info on filesystem at 1732438272587Initializing all the Stores at 1732438272588 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438272588Cleaning up temporary data from old regions at 1732438272608 (+20 ms)Running coprocessor post-open hooks at 1732438272611 (+3 ms)Region opened successfully at 1732438272612 (+1 ms) 2024-11-24T08:51:12,613 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732438271497.74de31bc5c2e2d20fe1a281b677a200f., pid=14, masterSystemTime=1732438272583 2024-11-24T08:51:12,613 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(403): Add compact mark for store 74de31bc5c2e2d20fe1a281b677a200f:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:51:12,613 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:12,613 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-24T08:51:12,615 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732438271497.74de31bc5c2e2d20fe1a281b677a200f. 2024-11-24T08:51:12,615 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1541): 74de31bc5c2e2d20fe1a281b677a200f/info is initiating minor compaction (all files) 2024-11-24T08:51:12,615 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 74de31bc5c2e2d20fe1a281b677a200f/info in TestLogRolling-testLogRolling,,1732438271497.74de31bc5c2e2d20fe1a281b677a200f. 2024-11-24T08:51:12,615 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/74de31bc5c2e2d20fe1a281b677a200f/info/84b8ddb34028407a9729ff723a5c2847.778da2ad8102f000e6527ce33247b88f->hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/84b8ddb34028407a9729ff723a5c2847-bottom] into tmpdir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/74de31bc5c2e2d20fe1a281b677a200f/.tmp, totalSize=77.9 K 2024-11-24T08:51:12,616 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732438271497.74de31bc5c2e2d20fe1a281b677a200f. 2024-11-24T08:51:12,616 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732438271497.74de31bc5c2e2d20fe1a281b677a200f. 2024-11-24T08:51:12,616 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e. 2024-11-24T08:51:12,616 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => bfc44d74913e973cb7e0bc7d91d76b5e, NAME => 'TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-24T08:51:12,616 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting 84b8ddb34028407a9729ff723a5c2847.778da2ad8102f000e6527ce33247b88f, keycount=34, bloomtype=ROW, size=77.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732438257194 2024-11-24T08:51:12,616 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:12,616 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:51:12,617 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:12,617 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:12,617 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=74de31bc5c2e2d20fe1a281b677a200f, regionState=OPEN, openSeqNum=130, regionLocation=469387a2cdb6,37059,1732438246097 2024-11-24T08:51:12,619 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-24T08:51:12,619 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-24T08:51:12,619 INFO [StoreOpener-bfc44d74913e973cb7e0bc7d91d76b5e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:12,619 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-24T08:51:12,619 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=14, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 74de31bc5c2e2d20fe1a281b677a200f, server=469387a2cdb6,37059,1732438246097 because future has completed 2024-11-24T08:51:12,620 INFO [StoreOpener-bfc44d74913e973cb7e0bc7d91d76b5e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bfc44d74913e973cb7e0bc7d91d76b5e columnFamilyName info 2024-11-24T08:51:12,620 DEBUG [StoreOpener-bfc44d74913e973cb7e0bc7d91d76b5e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:51:12,631 DEBUG [StoreOpener-bfc44d74913e973cb7e0bc7d91d76b5e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/84b8ddb34028407a9729ff723a5c2847.778da2ad8102f000e6527ce33247b88f->hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/84b8ddb34028407a9729ff723a5c2847-top 2024-11-24T08:51:12,634 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=11 2024-11-24T08:51:12,634 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 74de31bc5c2e2d20fe1a281b677a200f, server=469387a2cdb6,37059,1732438246097 in 189 msec 2024-11-24T08:51:12,636 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=74de31bc5c2e2d20fe1a281b677a200f, ASSIGN in 359 msec 2024-11-24T08:51:12,638 DEBUG [StoreOpener-bfc44d74913e973cb7e0bc7d91d76b5e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-c248268c80f04d76a87c7fc4f85bc13b 2024-11-24T08:51:12,639 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740/.tmp/info/14f5aea808a44daa98f90c28251534c2 is 193, key is TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e./info:regioninfo/1732438272428/Put/seqid=0 2024-11-24T08:51:12,640 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 74de31bc5c2e2d20fe1a281b677a200f#info#compaction#65 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:51:12,641 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/74de31bc5c2e2d20fe1a281b677a200f/.tmp/info/a51b099424ef497db347110019af34ac is 1080, key is row0001/info:/1732438257194/Put/seqid=0 2024-11-24T08:51:12,647 DEBUG [StoreOpener-bfc44d74913e973cb7e0bc7d91d76b5e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-d04aa409f1a54d419652ba9cd9039cf1 2024-11-24T08:51:12,660 DEBUG [StoreOpener-bfc44d74913e973cb7e0bc7d91d76b5e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-fce9998727864933865500e725c4420d 2024-11-24T08:51:12,660 INFO [StoreOpener-bfc44d74913e973cb7e0bc7d91d76b5e-1 {}] regionserver.HStore(327): Store=bfc44d74913e973cb7e0bc7d91d76b5e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:51:12,660 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:12,661 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:12,662 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:12,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741852_1028 (size=9847) 2024-11-24T08:51:12,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741852_1028 (size=9847) 2024-11-24T08:51:12,663 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:12,663 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:12,663 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740/.tmp/info/14f5aea808a44daa98f90c28251534c2 2024-11-24T08:51:12,665 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:12,666 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened bfc44d74913e973cb7e0bc7d91d76b5e; next sequenceid=130; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=708480, jitterRate=-0.09912188351154327}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T08:51:12,666 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:12,666 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for bfc44d74913e973cb7e0bc7d91d76b5e: Running coprocessor pre-open hook at 1732438272617Writing region info on filesystem at 1732438272617Initializing all the Stores at 1732438272619 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438272619Cleaning up temporary data from old regions at 1732438272663 (+44 ms)Running coprocessor post-open hooks at 1732438272666 (+3 ms)Region opened successfully at 1732438272666 2024-11-24T08:51:12,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741853_1029 (size=70862) 2024-11-24T08:51:12,667 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e., pid=13, masterSystemTime=1732438272583 2024-11-24T08:51:12,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741853_1029 (size=70862) 2024-11-24T08:51:12,667 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store bfc44d74913e973cb7e0bc7d91d76b5e:info, priority=-2147483648, current under compaction store size is 2 2024-11-24T08:51:12,667 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:12,667 DEBUG [RS:0;469387a2cdb6:37059-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-24T08:51:12,669 INFO [RS:0;469387a2cdb6:37059-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e. 2024-11-24T08:51:12,669 DEBUG [RS:0;469387a2cdb6:37059-longCompactions-0 {}] regionserver.HStore(1541): bfc44d74913e973cb7e0bc7d91d76b5e/info is initiating minor compaction (all files) 2024-11-24T08:51:12,669 INFO [RS:0;469387a2cdb6:37059-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bfc44d74913e973cb7e0bc7d91d76b5e/info in TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e. 2024-11-24T08:51:12,669 INFO [RS:0;469387a2cdb6:37059-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/84b8ddb34028407a9729ff723a5c2847.778da2ad8102f000e6527ce33247b88f->hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/84b8ddb34028407a9729ff723a5c2847-top, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-d04aa409f1a54d419652ba9cd9039cf1, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-c248268c80f04d76a87c7fc4f85bc13b, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-fce9998727864933865500e725c4420d] into tmpdir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp, totalSize=120.8 K 2024-11-24T08:51:12,671 DEBUG [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e. 2024-11-24T08:51:12,671 INFO [RS_OPEN_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e. 2024-11-24T08:51:12,671 DEBUG [RS:0;469387a2cdb6:37059-longCompactions-0 {}] compactions.Compactor(225): Compacting 84b8ddb34028407a9729ff723a5c2847.778da2ad8102f000e6527ce33247b88f, keycount=34, bloomtype=ROW, size=77.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732438257194 2024-11-24T08:51:12,671 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=bfc44d74913e973cb7e0bc7d91d76b5e, regionState=OPEN, openSeqNum=130, regionLocation=469387a2cdb6,37059,1732438246097 2024-11-24T08:51:12,673 DEBUG [RS:0;469387a2cdb6:37059-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-d04aa409f1a54d419652ba9cd9039cf1, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1732438271425 2024-11-24T08:51:12,674 DEBUG [RS:0;469387a2cdb6:37059-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-c248268c80f04d76a87c7fc4f85bc13b, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732438271448 2024-11-24T08:51:12,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure bfc44d74913e973cb7e0bc7d91d76b5e, server=469387a2cdb6,37059,1732438246097 because future has completed 2024-11-24T08:51:12,677 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/74de31bc5c2e2d20fe1a281b677a200f/.tmp/info/a51b099424ef497db347110019af34ac as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/74de31bc5c2e2d20fe1a281b677a200f/info/a51b099424ef497db347110019af34ac 2024-11-24T08:51:12,677 DEBUG [RS:0;469387a2cdb6:37059-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-fce9998727864933865500e725c4420d, keycount=4, bloomtype=ROW, size=9.1 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1732438271475 2024-11-24T08:51:12,685 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-11-24T08:51:12,685 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; OpenRegionProcedure bfc44d74913e973cb7e0bc7d91d76b5e, server=469387a2cdb6,37059,1732438246097 in 250 msec 2024-11-24T08:51:12,686 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 74de31bc5c2e2d20fe1a281b677a200f/info of 74de31bc5c2e2d20fe1a281b677a200f into a51b099424ef497db347110019af34ac(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:51:12,686 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 74de31bc5c2e2d20fe1a281b677a200f: 2024-11-24T08:51:12,686 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732438271497.74de31bc5c2e2d20fe1a281b677a200f., storeName=74de31bc5c2e2d20fe1a281b677a200f/info, priority=15, startTime=1732438272613; duration=0sec 2024-11-24T08:51:12,686 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:12,686 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 74de31bc5c2e2d20fe1a281b677a200f:info 2024-11-24T08:51:12,688 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=7 2024-11-24T08:51:12,688 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740/.tmp/ns/a1c9af42e8a044faa68ee6ce63ad9cc3 is 43, key is default/ns:d/1732438247029/Put/seqid=0 2024-11-24T08:51:12,688 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bfc44d74913e973cb7e0bc7d91d76b5e, ASSIGN in 410 msec 2024-11-24T08:51:12,690 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=778da2ad8102f000e6527ce33247b88f, daughterA=74de31bc5c2e2d20fe1a281b677a200f, daughterB=bfc44d74913e973cb7e0bc7d91d76b5e in 1.1910 sec 2024-11-24T08:51:12,690 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=778da2ad8102f000e6527ce33247b88f, daughterA=bc9cc8490a391834b4f9a6e12d64adff, daughterB=1832ad52cf8d02ca8792d131f64219ae 2024-11-24T08:51:12,690 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=778da2ad8102f000e6527ce33247b88f, daughterA=bc9cc8490a391834b4f9a6e12d64adff, daughterB=1832ad52cf8d02ca8792d131f64219ae 2024-11-24T08:51:12,690 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=778da2ad8102f000e6527ce33247b88f, daughterA=bc9cc8490a391834b4f9a6e12d64adff, daughterB=1832ad52cf8d02ca8792d131f64219ae 2024-11-24T08:51:12,691 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(534): Split of {ENCODED => 778da2ad8102f000e6527ce33247b88f, NAME => 'TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f.', STARTKEY => '', ENDKEY => ''} skipped; state is already SPLIT 2024-11-24T08:51:12,693 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=778da2ad8102f000e6527ce33247b88f, daughterA=bc9cc8490a391834b4f9a6e12d64adff, daughterB=1832ad52cf8d02ca8792d131f64219ae in 1.1860 sec 2024-11-24T08:51:12,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741854_1030 (size=5153) 2024-11-24T08:51:12,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741854_1030 (size=5153) 2024-11-24T08:51:12,701 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740/.tmp/ns/a1c9af42e8a044faa68ee6ce63ad9cc3 2024-11-24T08:51:12,711 INFO [RS:0;469387a2cdb6:37059-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfc44d74913e973cb7e0bc7d91d76b5e#info#compaction#68 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:51:12,712 DEBUG [RS:0;469387a2cdb6:37059-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/3aea9ff02ad84640a887b787f621c025 is 1080, key is row0062/info:/1732438269402/Put/seqid=0 2024-11-24T08:51:12,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741855_1031 (size=43081) 2024-11-24T08:51:12,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741855_1031 (size=43081) 2024-11-24T08:51:12,733 DEBUG [RS:0;469387a2cdb6:37059-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/3aea9ff02ad84640a887b787f621c025 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/3aea9ff02ad84640a887b787f621c025 2024-11-24T08:51:12,735 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740/.tmp/table/a81f45843e7649c9b8ca0431f61801d0 is 65, key is TestLogRolling-testLogRolling/table:state/1732438247513/Put/seqid=0 2024-11-24T08:51:12,740 INFO [RS:0;469387a2cdb6:37059-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in bfc44d74913e973cb7e0bc7d91d76b5e/info of bfc44d74913e973cb7e0bc7d91d76b5e into 3aea9ff02ad84640a887b787f621c025(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:51:12,740 DEBUG [RS:0;469387a2cdb6:37059-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:12,740 INFO [RS:0;469387a2cdb6:37059-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e., storeName=bfc44d74913e973cb7e0bc7d91d76b5e/info, priority=12, startTime=1732438272667; duration=0sec 2024-11-24T08:51:12,740 DEBUG [RS:0;469387a2cdb6:37059-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:12,740 DEBUG [RS:0;469387a2cdb6:37059-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfc44d74913e973cb7e0bc7d91d76b5e:info 2024-11-24T08:51:12,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741856_1032 (size=5340) 2024-11-24T08:51:12,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741856_1032 (size=5340) 2024-11-24T08:51:12,747 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740/.tmp/table/a81f45843e7649c9b8ca0431f61801d0 2024-11-24T08:51:12,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740/.tmp/info/14f5aea808a44daa98f90c28251534c2 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740/info/14f5aea808a44daa98f90c28251534c2 2024-11-24T08:51:12,757 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740/info/14f5aea808a44daa98f90c28251534c2, entries=30, sequenceid=17, filesize=9.6 K 2024-11-24T08:51:12,758 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740/.tmp/ns/a1c9af42e8a044faa68ee6ce63ad9cc3 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740/ns/a1c9af42e8a044faa68ee6ce63ad9cc3 2024-11-24T08:51:12,763 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740/ns/a1c9af42e8a044faa68ee6ce63ad9cc3, entries=2, sequenceid=17, filesize=5.0 K 2024-11-24T08:51:12,764 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740/.tmp/table/a81f45843e7649c9b8ca0431f61801d0 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740/table/a81f45843e7649c9b8ca0431f61801d0 2024-11-24T08:51:12,770 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740/table/a81f45843e7649c9b8ca0431f61801d0, entries=2, sequenceid=17, filesize=5.2 K 2024-11-24T08:51:12,771 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 152ms, sequenceid=17, compaction requested=false 2024-11-24T08:51:12,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-24T08:51:13,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:13,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:13,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:37692 deadline: 1732438283484, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f. is not online on 469387a2cdb6,37059,1732438246097 2024-11-24T08:51:13,485 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f., hostname=469387a2cdb6,37059,1732438246097, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f., hostname=469387a2cdb6,37059,1732438246097, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f. is not online on 469387a2cdb6,37059,1732438246097 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T08:51:13,486 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f., hostname=469387a2cdb6,37059,1732438246097, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f. is not online on 469387a2cdb6,37059,1732438246097 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T08:51:13,486 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732438247140.778da2ad8102f000e6527ce33247b88f., hostname=469387a2cdb6,37059,1732438246097, seqNum=2 from cache 2024-11-24T08:51:14,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:14,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:15,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:15,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:15,983 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T08:51:16,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:16,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:16,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:16,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:16,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:16,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:16,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:16,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:16,718 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:16,718 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:16,743 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:16,743 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:16,744 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:16,744 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:16,744 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:16,744 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:16,751 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:16,751 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:16,751 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:16,755 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:17,262 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T08:51:17,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:17,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:17,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:17,264 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:17,264 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:17,264 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:17,264 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:17,265 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:17,287 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:17,288 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:17,288 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:17,288 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:17,288 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:17,288 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:17,292 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:17,293 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:17,293 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:17,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:51:17,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:17,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:18,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:18,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:19,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:19,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:20,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:20,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:21,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:21,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:22,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:22,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:23,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:23,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:23,580 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e., hostname=469387a2cdb6,37059,1732438246097, seqNum=130] 2024-11-24T08:51:23,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:23,592 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bfc44d74913e973cb7e0bc7d91d76b5e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:51:23,596 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/bf2aec0fb6c8482ea2d261bd55082cf8 is 1080, key is row0097/info:/1732438283581/Put/seqid=0 2024-11-24T08:51:23,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741857_1033 (size=12516) 2024-11-24T08:51:23,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741857_1033 (size=12516) 2024-11-24T08:51:23,608 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/bf2aec0fb6c8482ea2d261bd55082cf8 2024-11-24T08:51:23,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/bf2aec0fb6c8482ea2d261bd55082cf8 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/bf2aec0fb6c8482ea2d261bd55082cf8 2024-11-24T08:51:23,618 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/bf2aec0fb6c8482ea2d261bd55082cf8, entries=7, sequenceid=140, filesize=12.2 K 2024-11-24T08:51:23,619 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for bfc44d74913e973cb7e0bc7d91d76b5e in 27ms, sequenceid=140, compaction requested=false 2024-11-24T08:51:23,619 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:23,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:23,620 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bfc44d74913e973cb7e0bc7d91d76b5e 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-24T08:51:23,624 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/d7780ba5dbb846328ca85f6778cc84f1 is 1080, key is row0104/info:/1732438283593/Put/seqid=0 2024-11-24T08:51:23,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741858_1034 (size=21156) 2024-11-24T08:51:23,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741858_1034 (size=21156) 2024-11-24T08:51:23,630 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/d7780ba5dbb846328ca85f6778cc84f1 2024-11-24T08:51:23,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/d7780ba5dbb846328ca85f6778cc84f1 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/d7780ba5dbb846328ca85f6778cc84f1 2024-11-24T08:51:23,642 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/d7780ba5dbb846328ca85f6778cc84f1, entries=15, sequenceid=158, filesize=20.7 K 2024-11-24T08:51:23,643 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=10.51 KB/10760 for bfc44d74913e973cb7e0bc7d91d76b5e in 23ms, sequenceid=158, compaction requested=true 2024-11-24T08:51:23,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:23,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfc44d74913e973cb7e0bc7d91d76b5e:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:51:23,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:23,643 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:51:23,644 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 76753 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:51:23,645 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1541): bfc44d74913e973cb7e0bc7d91d76b5e/info is initiating minor compaction (all files) 2024-11-24T08:51:23,645 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bfc44d74913e973cb7e0bc7d91d76b5e/info in TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e. 2024-11-24T08:51:23,645 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/3aea9ff02ad84640a887b787f621c025, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/bf2aec0fb6c8482ea2d261bd55082cf8, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/d7780ba5dbb846328ca85f6778cc84f1] into tmpdir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp, totalSize=75.0 K 2024-11-24T08:51:23,645 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3aea9ff02ad84640a887b787f621c025, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1732438269402 2024-11-24T08:51:23,645 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting bf2aec0fb6c8482ea2d261bd55082cf8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732438283581 2024-11-24T08:51:23,646 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting d7780ba5dbb846328ca85f6778cc84f1, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732438283593 2024-11-24T08:51:23,660 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfc44d74913e973cb7e0bc7d91d76b5e#info#compaction#72 average throughput is 29.25 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:51:23,660 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/25b132620d7941cdb79956de896e4e3d is 1080, key is row0062/info:/1732438269402/Put/seqid=0 2024-11-24T08:51:23,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741859_1035 (size=66967) 2024-11-24T08:51:23,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741859_1035 (size=66967) 2024-11-24T08:51:23,670 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/25b132620d7941cdb79956de896e4e3d as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/25b132620d7941cdb79956de896e4e3d 2024-11-24T08:51:23,675 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bfc44d74913e973cb7e0bc7d91d76b5e/info of bfc44d74913e973cb7e0bc7d91d76b5e into 25b132620d7941cdb79956de896e4e3d(size=65.4 K), total size for store is 65.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:51:23,675 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:23,675 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e., storeName=bfc44d74913e973cb7e0bc7d91d76b5e/info, priority=13, startTime=1732438283643; duration=0sec 2024-11-24T08:51:23,675 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:23,675 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfc44d74913e973cb7e0bc7d91d76b5e:info 2024-11-24T08:51:24,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:24,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:25,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:25,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:25,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:25,646 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bfc44d74913e973cb7e0bc7d91d76b5e 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-24T08:51:25,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/8852c8f897704eecb340e87dc6d35c49 is 1080, key is row0119/info:/1732438283622/Put/seqid=0 2024-11-24T08:51:25,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741860_1036 (size=16828) 2024-11-24T08:51:25,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741860_1036 (size=16828) 2024-11-24T08:51:25,658 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/8852c8f897704eecb340e87dc6d35c49 2024-11-24T08:51:25,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/8852c8f897704eecb340e87dc6d35c49 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/8852c8f897704eecb340e87dc6d35c49 2024-11-24T08:51:25,671 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/8852c8f897704eecb340e87dc6d35c49, entries=11, sequenceid=173, filesize=16.4 K 2024-11-24T08:51:25,672 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=10.51 KB/10760 for bfc44d74913e973cb7e0bc7d91d76b5e in 26ms, sequenceid=173, compaction requested=false 2024-11-24T08:51:25,672 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:25,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:25,675 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bfc44d74913e973cb7e0bc7d91d76b5e 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-24T08:51:25,679 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/c058270569c24ef29a926b629522f81e is 1080, key is row0130/info:/1732438285647/Put/seqid=0 2024-11-24T08:51:25,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741861_1037 (size=17906) 2024-11-24T08:51:25,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741861_1037 (size=17906) 2024-11-24T08:51:25,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bfc44d74913e973cb7e0bc7d91d76b5e, server=469387a2cdb6,37059,1732438246097 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-24T08:51:25,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:37692 deadline: 1732438295704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bfc44d74913e973cb7e0bc7d91d76b5e, server=469387a2cdb6,37059,1732438246097 2024-11-24T08:51:25,705 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e., hostname=469387a2cdb6,37059,1732438246097, seqNum=130 , the old value is region=TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e., hostname=469387a2cdb6,37059,1732438246097, seqNum=130, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bfc44d74913e973cb7e0bc7d91d76b5e, server=469387a2cdb6,37059,1732438246097 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T08:51:25,705 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e., hostname=469387a2cdb6,37059,1732438246097, seqNum=130 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bfc44d74913e973cb7e0bc7d91d76b5e, server=469387a2cdb6,37059,1732438246097 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T08:51:25,705 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e., hostname=469387a2cdb6,37059,1732438246097, seqNum=130 because the exception is null or not the one we care about 2024-11-24T08:51:26,087 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=188 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/c058270569c24ef29a926b629522f81e 2024-11-24T08:51:26,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/c058270569c24ef29a926b629522f81e as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/c058270569c24ef29a926b629522f81e 2024-11-24T08:51:26,104 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/c058270569c24ef29a926b629522f81e, entries=12, sequenceid=188, filesize=17.5 K 2024-11-24T08:51:26,105 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=17.86 KB/18292 for bfc44d74913e973cb7e0bc7d91d76b5e in 431ms, sequenceid=188, compaction requested=true 2024-11-24T08:51:26,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:26,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfc44d74913e973cb7e0bc7d91d76b5e:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:51:26,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:26,105 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:51:26,107 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101701 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:51:26,107 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1541): bfc44d74913e973cb7e0bc7d91d76b5e/info is initiating minor compaction (all files) 2024-11-24T08:51:26,107 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bfc44d74913e973cb7e0bc7d91d76b5e/info in TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e. 2024-11-24T08:51:26,107 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/25b132620d7941cdb79956de896e4e3d, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/8852c8f897704eecb340e87dc6d35c49, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/c058270569c24ef29a926b629522f81e] into tmpdir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp, totalSize=99.3 K 2024-11-24T08:51:26,107 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting 25b132620d7941cdb79956de896e4e3d, keycount=57, bloomtype=ROW, size=65.4 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732438269402 2024-11-24T08:51:26,108 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8852c8f897704eecb340e87dc6d35c49, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732438283622 2024-11-24T08:51:26,108 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting c058270569c24ef29a926b629522f81e, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1732438285647 2024-11-24T08:51:26,123 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfc44d74913e973cb7e0bc7d91d76b5e#info#compaction#75 average throughput is 41.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:51:26,123 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/354025f7c45a46b7848eb2585c479790 is 1080, key is row0062/info:/1732438269402/Put/seqid=0 2024-11-24T08:51:26,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741862_1038 (size=91940) 2024-11-24T08:51:26,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741862_1038 (size=91940) 2024-11-24T08:51:26,132 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/354025f7c45a46b7848eb2585c479790 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/354025f7c45a46b7848eb2585c479790 2024-11-24T08:51:26,137 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bfc44d74913e973cb7e0bc7d91d76b5e/info of bfc44d74913e973cb7e0bc7d91d76b5e into 354025f7c45a46b7848eb2585c479790(size=89.8 K), total size for store is 89.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:51:26,137 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:26,137 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e., storeName=bfc44d74913e973cb7e0bc7d91d76b5e/info, priority=13, startTime=1732438286105; duration=0sec 2024-11-24T08:51:26,138 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:26,138 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfc44d74913e973cb7e0bc7d91d76b5e:info 2024-11-24T08:51:26,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:26,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:27,161 INFO [master/469387a2cdb6:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T08:51:27,161 INFO [master/469387a2cdb6:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T08:51:27,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:27,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:28,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:28,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:29,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:29,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:30,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:30,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:31,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:31,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:32,002 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-24T08:51:32,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:32,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:33,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:33,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:34,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:34,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:35,500 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:35,500 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:35,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:35,809 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bfc44d74913e973cb7e0bc7d91d76b5e 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-24T08:51:35,815 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/1b05fda6356e4ed6b2562e0564e5539f is 1080, key is row0142/info:/1732438285676/Put/seqid=0 2024-11-24T08:51:35,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741863_1039 (size=24394) 2024-11-24T08:51:35,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741863_1039 (size=24394) 2024-11-24T08:51:35,823 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/1b05fda6356e4ed6b2562e0564e5539f 2024-11-24T08:51:35,830 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/1b05fda6356e4ed6b2562e0564e5539f as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/1b05fda6356e4ed6b2562e0564e5539f 2024-11-24T08:51:35,837 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/1b05fda6356e4ed6b2562e0564e5539f, entries=18, sequenceid=210, filesize=23.8 K 2024-11-24T08:51:35,838 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=1.05 KB/1076 for bfc44d74913e973cb7e0bc7d91d76b5e in 29ms, sequenceid=210, compaction requested=false 2024-11-24T08:51:35,838 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:36,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:36,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:37,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:37,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:37,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:37,831 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bfc44d74913e973cb7e0bc7d91d76b5e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:51:37,835 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/e931c5b53d4f4de28cef2bf98c81d164 is 1080, key is row0160/info:/1732438295811/Put/seqid=0 2024-11-24T08:51:37,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741864_1040 (size=12516) 2024-11-24T08:51:37,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741864_1040 (size=12516) 2024-11-24T08:51:37,841 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/e931c5b53d4f4de28cef2bf98c81d164 2024-11-24T08:51:37,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/e931c5b53d4f4de28cef2bf98c81d164 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/e931c5b53d4f4de28cef2bf98c81d164 2024-11-24T08:51:37,852 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/e931c5b53d4f4de28cef2bf98c81d164, entries=7, sequenceid=220, filesize=12.2 K 2024-11-24T08:51:37,853 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for bfc44d74913e973cb7e0bc7d91d76b5e in 23ms, sequenceid=220, compaction requested=true 2024-11-24T08:51:37,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:37,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfc44d74913e973cb7e0bc7d91d76b5e:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:51:37,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:37,853 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:51:37,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:37,853 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bfc44d74913e973cb7e0bc7d91d76b5e 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-24T08:51:37,854 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128850 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:51:37,854 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1541): bfc44d74913e973cb7e0bc7d91d76b5e/info is initiating minor compaction (all files) 2024-11-24T08:51:37,854 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bfc44d74913e973cb7e0bc7d91d76b5e/info in TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e. 2024-11-24T08:51:37,855 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/354025f7c45a46b7848eb2585c479790, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/1b05fda6356e4ed6b2562e0564e5539f, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/e931c5b53d4f4de28cef2bf98c81d164] into tmpdir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp, totalSize=125.8 K 2024-11-24T08:51:37,855 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting 354025f7c45a46b7848eb2585c479790, keycount=80, bloomtype=ROW, size=89.8 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1732438269402 2024-11-24T08:51:37,855 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1b05fda6356e4ed6b2562e0564e5539f, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732438285676 2024-11-24T08:51:37,856 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting e931c5b53d4f4de28cef2bf98c81d164, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732438295811 2024-11-24T08:51:37,858 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/1a311ebd95974e538350b81251381994 is 1080, key is row0167/info:/1732438297832/Put/seqid=0 2024-11-24T08:51:37,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741865_1041 (size=16828) 2024-11-24T08:51:37,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741865_1041 (size=16828) 2024-11-24T08:51:37,863 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/1a311ebd95974e538350b81251381994 2024-11-24T08:51:37,870 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfc44d74913e973cb7e0bc7d91d76b5e#info#compaction#79 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:51:37,870 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/f80b33389efd45af82144c3060abede7 is 1080, key is row0062/info:/1732438269402/Put/seqid=0 2024-11-24T08:51:37,871 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/1a311ebd95974e538350b81251381994 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/1a311ebd95974e538350b81251381994 2024-11-24T08:51:37,876 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/1a311ebd95974e538350b81251381994, entries=11, sequenceid=234, filesize=16.4 K 2024-11-24T08:51:37,877 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for bfc44d74913e973cb7e0bc7d91d76b5e in 24ms, sequenceid=234, compaction requested=false 2024-11-24T08:51:37,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:37,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:37,878 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bfc44d74913e973cb7e0bc7d91d76b5e 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-24T08:51:37,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741866_1042 (size=118996) 2024-11-24T08:51:37,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741866_1042 (size=118996) 2024-11-24T08:51:37,882 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/f9b2d84d88a544dda681c8ef2b512e4a is 1080, key is row0178/info:/1732438297854/Put/seqid=0 2024-11-24T08:51:37,887 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/f80b33389efd45af82144c3060abede7 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/f80b33389efd45af82144c3060abede7 2024-11-24T08:51:37,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741867_1043 (size=15750) 2024-11-24T08:51:37,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741867_1043 (size=15750) 2024-11-24T08:51:37,889 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/f9b2d84d88a544dda681c8ef2b512e4a 2024-11-24T08:51:37,894 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bfc44d74913e973cb7e0bc7d91d76b5e/info of bfc44d74913e973cb7e0bc7d91d76b5e into f80b33389efd45af82144c3060abede7(size=116.2 K), total size for store is 132.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:51:37,894 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:37,894 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e., storeName=bfc44d74913e973cb7e0bc7d91d76b5e/info, priority=13, startTime=1732438297853; duration=0sec 2024-11-24T08:51:37,894 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:37,894 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/f9b2d84d88a544dda681c8ef2b512e4a as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/f9b2d84d88a544dda681c8ef2b512e4a 2024-11-24T08:51:37,894 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfc44d74913e973cb7e0bc7d91d76b5e:info 2024-11-24T08:51:37,899 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/f9b2d84d88a544dda681c8ef2b512e4a, entries=10, sequenceid=247, filesize=15.4 K 2024-11-24T08:51:37,901 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=5.25 KB/5380 for bfc44d74913e973cb7e0bc7d91d76b5e in 22ms, sequenceid=247, compaction requested=true 2024-11-24T08:51:37,901 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:37,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfc44d74913e973cb7e0bc7d91d76b5e:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:51:37,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:37,901 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:51:37,902 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 151574 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:51:37,902 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1541): bfc44d74913e973cb7e0bc7d91d76b5e/info is initiating minor compaction (all files) 2024-11-24T08:51:37,902 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bfc44d74913e973cb7e0bc7d91d76b5e/info in TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e. 2024-11-24T08:51:37,902 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/f80b33389efd45af82144c3060abede7, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/1a311ebd95974e538350b81251381994, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/f9b2d84d88a544dda681c8ef2b512e4a] into tmpdir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp, totalSize=148.0 K 2024-11-24T08:51:37,903 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting f80b33389efd45af82144c3060abede7, keycount=105, bloomtype=ROW, size=116.2 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732438269402 2024-11-24T08:51:37,903 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1a311ebd95974e538350b81251381994, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732438297832 2024-11-24T08:51:37,904 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting f9b2d84d88a544dda681c8ef2b512e4a, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732438297854 2024-11-24T08:51:37,915 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfc44d74913e973cb7e0bc7d91d76b5e#info#compaction#81 average throughput is 43.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:51:37,916 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/9b862f55c20b4235a4de0dd0885daf41 is 1080, key is row0062/info:/1732438269402/Put/seqid=0 2024-11-24T08:51:37,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741868_1044 (size=141941) 2024-11-24T08:51:37,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741868_1044 (size=141941) 2024-11-24T08:51:37,924 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/9b862f55c20b4235a4de0dd0885daf41 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/9b862f55c20b4235a4de0dd0885daf41 2024-11-24T08:51:37,929 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bfc44d74913e973cb7e0bc7d91d76b5e/info of bfc44d74913e973cb7e0bc7d91d76b5e into 9b862f55c20b4235a4de0dd0885daf41(size=138.6 K), total size for store is 138.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:51:37,929 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:37,929 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e., storeName=bfc44d74913e973cb7e0bc7d91d76b5e/info, priority=13, startTime=1732438297901; duration=0sec 2024-11-24T08:51:37,929 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:37,929 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfc44d74913e973cb7e0bc7d91d76b5e:info 2024-11-24T08:51:38,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:38,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:39,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:39,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:39,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:39,894 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bfc44d74913e973cb7e0bc7d91d76b5e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:51:39,899 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/f6c5a49dc0594fe9a31cf500c77e72e7 is 1080, key is row0188/info:/1732438297879/Put/seqid=0 2024-11-24T08:51:39,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741869_1045 (size=12518) 2024-11-24T08:51:39,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741869_1045 (size=12518) 2024-11-24T08:51:39,915 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/f6c5a49dc0594fe9a31cf500c77e72e7 2024-11-24T08:51:39,921 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/f6c5a49dc0594fe9a31cf500c77e72e7 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/f6c5a49dc0594fe9a31cf500c77e72e7 2024-11-24T08:51:39,927 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/f6c5a49dc0594fe9a31cf500c77e72e7, entries=7, sequenceid=259, filesize=12.2 K 2024-11-24T08:51:39,929 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for bfc44d74913e973cb7e0bc7d91d76b5e in 35ms, sequenceid=259, compaction requested=false 2024-11-24T08:51:39,929 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:39,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:39,932 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bfc44d74913e973cb7e0bc7d91d76b5e 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-24T08:51:39,937 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/5be248d08eb6490b9f57f3d338724036 is 1080, key is row0195/info:/1732438299895/Put/seqid=0 2024-11-24T08:51:39,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741870_1046 (size=22254) 2024-11-24T08:51:39,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741870_1046 (size=22254) 2024-11-24T08:51:39,944 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/5be248d08eb6490b9f57f3d338724036 2024-11-24T08:51:39,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/5be248d08eb6490b9f57f3d338724036 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/5be248d08eb6490b9f57f3d338724036 2024-11-24T08:51:39,957 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/5be248d08eb6490b9f57f3d338724036, entries=16, sequenceid=278, filesize=21.7 K 2024-11-24T08:51:39,958 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=11.56 KB/11836 for bfc44d74913e973cb7e0bc7d91d76b5e in 26ms, sequenceid=278, compaction requested=true 2024-11-24T08:51:39,958 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:39,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfc44d74913e973cb7e0bc7d91d76b5e:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:51:39,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:39,958 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:51:39,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:39,960 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 176713 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:51:39,960 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bfc44d74913e973cb7e0bc7d91d76b5e 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-24T08:51:39,960 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1541): bfc44d74913e973cb7e0bc7d91d76b5e/info is initiating minor compaction (all files) 2024-11-24T08:51:39,960 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bfc44d74913e973cb7e0bc7d91d76b5e/info in TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e. 2024-11-24T08:51:39,960 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/9b862f55c20b4235a4de0dd0885daf41, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/f6c5a49dc0594fe9a31cf500c77e72e7, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/5be248d08eb6490b9f57f3d338724036] into tmpdir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp, totalSize=172.6 K 2024-11-24T08:51:39,961 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9b862f55c20b4235a4de0dd0885daf41, keycount=126, bloomtype=ROW, size=138.6 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732438269402 2024-11-24T08:51:39,962 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting f6c5a49dc0594fe9a31cf500c77e72e7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732438297879 2024-11-24T08:51:39,963 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5be248d08eb6490b9f57f3d338724036, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732438299895 2024-11-24T08:51:39,967 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/fa12a0fa3dc1441186a935b011d8bee2 is 1080, key is row0211/info:/1732438299934/Put/seqid=0 2024-11-24T08:51:39,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741871_1047 (size=17918) 2024-11-24T08:51:39,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741871_1047 (size=17918) 2024-11-24T08:51:39,980 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfc44d74913e973cb7e0bc7d91d76b5e#info#compaction#85 average throughput is 38.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:51:39,980 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/22eaecfe597c4597a7ee0991d1117ea1 is 1080, key is row0062/info:/1732438269402/Put/seqid=0 2024-11-24T08:51:39,981 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/fa12a0fa3dc1441186a935b011d8bee2 2024-11-24T08:51:39,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/fa12a0fa3dc1441186a935b011d8bee2 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/fa12a0fa3dc1441186a935b011d8bee2 2024-11-24T08:51:39,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741872_1048 (size=166859) 2024-11-24T08:51:39,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741872_1048 (size=166859) 2024-11-24T08:51:39,995 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/fa12a0fa3dc1441186a935b011d8bee2, entries=12, sequenceid=293, filesize=17.5 K 2024-11-24T08:51:39,996 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=2.10 KB/2152 for bfc44d74913e973cb7e0bc7d91d76b5e in 37ms, sequenceid=293, compaction requested=false 2024-11-24T08:51:39,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:39,999 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/22eaecfe597c4597a7ee0991d1117ea1 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/22eaecfe597c4597a7ee0991d1117ea1 2024-11-24T08:51:40,007 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bfc44d74913e973cb7e0bc7d91d76b5e/info of bfc44d74913e973cb7e0bc7d91d76b5e into 22eaecfe597c4597a7ee0991d1117ea1(size=162.9 K), total size for store is 180.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:51:40,007 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:40,007 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e., storeName=bfc44d74913e973cb7e0bc7d91d76b5e/info, priority=13, startTime=1732438299958; duration=0sec 2024-11-24T08:51:40,008 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:40,008 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfc44d74913e973cb7e0bc7d91d76b5e:info 2024-11-24T08:51:40,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:40,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:41,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:41,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:41,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:41,987 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bfc44d74913e973cb7e0bc7d91d76b5e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:51:41,991 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/35060008655241c7b36d1ae1a7634e91 is 1080, key is row0223/info:/1732438299961/Put/seqid=0 2024-11-24T08:51:41,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741873_1049 (size=12523) 2024-11-24T08:51:41,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741873_1049 (size=12523) 2024-11-24T08:51:41,997 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/35060008655241c7b36d1ae1a7634e91 2024-11-24T08:51:42,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/35060008655241c7b36d1ae1a7634e91 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/35060008655241c7b36d1ae1a7634e91 2024-11-24T08:51:42,008 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/35060008655241c7b36d1ae1a7634e91, entries=7, sequenceid=304, filesize=12.2 K 2024-11-24T08:51:42,009 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for bfc44d74913e973cb7e0bc7d91d76b5e in 22ms, sequenceid=304, compaction requested=true 2024-11-24T08:51:42,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:42,010 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfc44d74913e973cb7e0bc7d91d76b5e:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:51:42,010 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:42,010 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:51:42,011 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 197300 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:51:42,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:42,011 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1541): bfc44d74913e973cb7e0bc7d91d76b5e/info is initiating minor compaction (all files) 2024-11-24T08:51:42,011 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bfc44d74913e973cb7e0bc7d91d76b5e/info in TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e. 2024-11-24T08:51:42,011 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bfc44d74913e973cb7e0bc7d91d76b5e 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-24T08:51:42,011 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/22eaecfe597c4597a7ee0991d1117ea1, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/fa12a0fa3dc1441186a935b011d8bee2, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/35060008655241c7b36d1ae1a7634e91] into tmpdir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp, totalSize=192.7 K 2024-11-24T08:51:42,011 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting 22eaecfe597c4597a7ee0991d1117ea1, keycount=149, bloomtype=ROW, size=162.9 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732438269402 2024-11-24T08:51:42,012 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting fa12a0fa3dc1441186a935b011d8bee2, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732438299934 2024-11-24T08:51:42,012 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting 35060008655241c7b36d1ae1a7634e91, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1732438299961 2024-11-24T08:51:42,015 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/edb4aeb1f0a2454db285ce25acf0462e is 1080, key is row0230/info:/1732438301988/Put/seqid=0 2024-11-24T08:51:42,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741874_1050 (size=19013) 2024-11-24T08:51:42,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741874_1050 (size=19013) 2024-11-24T08:51:42,022 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/edb4aeb1f0a2454db285ce25acf0462e 2024-11-24T08:51:42,025 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfc44d74913e973cb7e0bc7d91d76b5e#info#compaction#88 average throughput is 57.46 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:51:42,026 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/d6a6eeb2d7e342b793dfb40c1514306f is 1080, key is row0062/info:/1732438269402/Put/seqid=0 2024-11-24T08:51:42,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/edb4aeb1f0a2454db285ce25acf0462e as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/edb4aeb1f0a2454db285ce25acf0462e 2024-11-24T08:51:42,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741875_1051 (size=187470) 2024-11-24T08:51:42,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741875_1051 (size=187470) 2024-11-24T08:51:42,034 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/edb4aeb1f0a2454db285ce25acf0462e, entries=13, sequenceid=320, filesize=18.6 K 2024-11-24T08:51:42,035 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/d6a6eeb2d7e342b793dfb40c1514306f as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/d6a6eeb2d7e342b793dfb40c1514306f 2024-11-24T08:51:42,035 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=10.51 KB/10760 for bfc44d74913e973cb7e0bc7d91d76b5e in 24ms, sequenceid=320, compaction requested=false 2024-11-24T08:51:42,035 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:42,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37059 {}] regionserver.HRegion(8855): Flush requested on bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:42,036 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bfc44d74913e973cb7e0bc7d91d76b5e 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-24T08:51:42,040 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/ed85f7b860cb4b7585de569c7091c4aa is 1080, key is row0243/info:/1732438302012/Put/seqid=0 2024-11-24T08:51:42,042 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bfc44d74913e973cb7e0bc7d91d76b5e/info of bfc44d74913e973cb7e0bc7d91d76b5e into d6a6eeb2d7e342b793dfb40c1514306f(size=183.1 K), total size for store is 201.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:51:42,042 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:42,042 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e., storeName=bfc44d74913e973cb7e0bc7d91d76b5e/info, priority=13, startTime=1732438302009; duration=0sec 2024-11-24T08:51:42,042 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:42,042 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfc44d74913e973cb7e0bc7d91d76b5e:info 2024-11-24T08:51:42,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741876_1052 (size=16839) 2024-11-24T08:51:42,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741876_1052 (size=16839) 2024-11-24T08:51:42,045 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/ed85f7b860cb4b7585de569c7091c4aa 2024-11-24T08:51:42,050 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/ed85f7b860cb4b7585de569c7091c4aa as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/ed85f7b860cb4b7585de569c7091c4aa 2024-11-24T08:51:42,055 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/ed85f7b860cb4b7585de569c7091c4aa, entries=11, sequenceid=334, filesize=16.4 K 2024-11-24T08:51:42,056 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=3.15 KB/3228 for bfc44d74913e973cb7e0bc7d91d76b5e in 19ms, sequenceid=334, compaction requested=true 2024-11-24T08:51:42,056 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:42,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfc44d74913e973cb7e0bc7d91d76b5e:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:51:42,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:42,056 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:51:42,057 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 223322 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:51:42,057 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1541): bfc44d74913e973cb7e0bc7d91d76b5e/info is initiating minor compaction (all files) 2024-11-24T08:51:42,057 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bfc44d74913e973cb7e0bc7d91d76b5e/info in TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e. 2024-11-24T08:51:42,057 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/d6a6eeb2d7e342b793dfb40c1514306f, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/edb4aeb1f0a2454db285ce25acf0462e, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/ed85f7b860cb4b7585de569c7091c4aa] into tmpdir=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp, totalSize=218.1 K 2024-11-24T08:51:42,058 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting d6a6eeb2d7e342b793dfb40c1514306f, keycount=168, bloomtype=ROW, size=183.1 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1732438269402 2024-11-24T08:51:42,058 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting edb4aeb1f0a2454db285ce25acf0462e, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1732438301988 2024-11-24T08:51:42,058 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] compactions.Compactor(225): Compacting ed85f7b860cb4b7585de569c7091c4aa, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732438302012 2024-11-24T08:51:42,072 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfc44d74913e973cb7e0bc7d91d76b5e#info#compaction#90 average throughput is 39.40 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:51:42,073 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/4d6093ec93ad4efaadfcd51673c4ffcd is 1080, key is row0062/info:/1732438269402/Put/seqid=0 2024-11-24T08:51:42,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741877_1053 (size=213541) 2024-11-24T08:51:42,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741877_1053 (size=213541) 2024-11-24T08:51:42,080 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/4d6093ec93ad4efaadfcd51673c4ffcd as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/4d6093ec93ad4efaadfcd51673c4ffcd 2024-11-24T08:51:42,086 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bfc44d74913e973cb7e0bc7d91d76b5e/info of bfc44d74913e973cb7e0bc7d91d76b5e into 4d6093ec93ad4efaadfcd51673c4ffcd(size=208.5 K), total size for store is 208.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:51:42,086 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:42,086 INFO [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e., storeName=bfc44d74913e973cb7e0bc7d91d76b5e/info, priority=13, startTime=1732438302056; duration=0sec 2024-11-24T08:51:42,086 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:51:42,086 DEBUG [RS:0;469387a2cdb6:37059-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfc44d74913e973cb7e0bc7d91d76b5e:info 2024-11-24T08:51:42,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:42,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:43,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:43,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:44,043 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-24T08:51:44,044 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C37059%2C1732438246097.1732438304043 2024-11-24T08:51:44,051 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,051 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,051 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,051 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,051 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,051 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/WALs/469387a2cdb6,37059,1732438246097/469387a2cdb6%2C37059%2C1732438246097.1732438246625 with entries=317, filesize=310.19 KB; new WAL /user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/WALs/469387a2cdb6,37059,1732438246097/469387a2cdb6%2C37059%2C1732438246097.1732438304043 2024-11-24T08:51:44,052 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46531:46531),(127.0.0.1/127.0.0.1:43467:43467)] 2024-11-24T08:51:44,053 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/WALs/469387a2cdb6,37059,1732438246097/469387a2cdb6%2C37059%2C1732438246097.1732438246625 is not closed yet, will try archiving it next time 2024-11-24T08:51:44,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741833_1009 (size=317642) 2024-11-24T08:51:44,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741833_1009 (size=317642) 2024-11-24T08:51:44,056 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 74de31bc5c2e2d20fe1a281b677a200f: 2024-11-24T08:51:44,056 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing bfc44d74913e973cb7e0bc7d91d76b5e 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-24T08:51:44,061 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/8ad036449f58430cb00eaa1edf12cddd is 1080, key is row0254/info:/1732438302038/Put/seqid=0 2024-11-24T08:51:44,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741879_1055 (size=8199) 2024-11-24T08:51:44,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741879_1055 (size=8199) 2024-11-24T08:51:44,065 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/8ad036449f58430cb00eaa1edf12cddd 2024-11-24T08:51:44,070 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/.tmp/info/8ad036449f58430cb00eaa1edf12cddd as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/8ad036449f58430cb00eaa1edf12cddd 2024-11-24T08:51:44,074 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/8ad036449f58430cb00eaa1edf12cddd, entries=3, sequenceid=342, filesize=8.0 K 2024-11-24T08:51:44,075 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for bfc44d74913e973cb7e0bc7d91d76b5e in 19ms, sequenceid=342, compaction requested=false 2024-11-24T08:51:44,075 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for bfc44d74913e973cb7e0bc7d91d76b5e: 2024-11-24T08:51:44,075 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-24T08:51:44,079 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740/.tmp/info/9ae7b48e722a4e0587c499783bdb76cb is 193, key is TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e./info:regioninfo/1732438272671/Put/seqid=0 2024-11-24T08:51:44,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741880_1056 (size=6223) 2024-11-24T08:51:44,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741880_1056 (size=6223) 2024-11-24T08:51:44,084 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740/.tmp/info/9ae7b48e722a4e0587c499783bdb76cb 2024-11-24T08:51:44,088 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740/.tmp/info/9ae7b48e722a4e0587c499783bdb76cb as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740/info/9ae7b48e722a4e0587c499783bdb76cb 2024-11-24T08:51:44,093 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740/info/9ae7b48e722a4e0587c499783bdb76cb, entries=5, sequenceid=21, filesize=6.1 K 2024-11-24T08:51:44,094 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 19ms, sequenceid=21, compaction requested=false 2024-11-24T08:51:44,094 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-24T08:51:44,094 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C37059%2C1732438246097.1732438304094 2024-11-24T08:51:44,099 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,099 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,099 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,099 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,099 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,099 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/WALs/469387a2cdb6,37059,1732438246097/469387a2cdb6%2C37059%2C1732438246097.1732438304043 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/WALs/469387a2cdb6,37059,1732438246097/469387a2cdb6%2C37059%2C1732438246097.1732438304094 2024-11-24T08:51:44,100 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46531:46531),(127.0.0.1/127.0.0.1:43467:43467)] 2024-11-24T08:51:44,100 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/WALs/469387a2cdb6,37059,1732438246097/469387a2cdb6%2C37059%2C1732438246097.1732438304043 is not closed yet, will try archiving it next time 2024-11-24T08:51:44,100 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/WALs/469387a2cdb6,37059,1732438246097/469387a2cdb6%2C37059%2C1732438246097.1732438246625 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/oldWALs/469387a2cdb6%2C37059%2C1732438246097.1732438246625 2024-11-24T08:51:44,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741878_1054 (size=731) 2024-11-24T08:51:44,101 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T08:51:44,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741878_1054 (size=731) 2024-11-24T08:51:44,101 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/WALs/469387a2cdb6,37059,1732438246097/469387a2cdb6%2C37059%2C1732438246097.1732438304043 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/oldWALs/469387a2cdb6%2C37059%2C1732438246097.1732438304043 2024-11-24T08:51:44,201 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T08:51:44,202 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:51:44,202 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:51:44,202 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:51:44,203 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:51:44,203 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T08:51:44,203 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T08:51:44,203 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1010273218, stopped=false 2024-11-24T08:51:44,204 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=469387a2cdb6,34461,1732438246017 2024-11-24T08:51:44,206 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x10070ed1ed10001, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:51:44,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:51:44,206 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x10070ed1ed10001, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:51:44,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:51:44,206 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:51:44,206 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:51:44,206 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:51:44,207 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:51:44,207 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37059-0x10070ed1ed10001, quorum=127.0.0.1:55564, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:51:44,207 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '469387a2cdb6,37059,1732438246097' ***** 2024-11-24T08:51:44,207 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T08:51:44,207 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:51:44,207 INFO [RS:0;469387a2cdb6:37059 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T08:51:44,208 INFO [RS:0;469387a2cdb6:37059 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T08:51:44,208 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T08:51:44,208 INFO [RS:0;469387a2cdb6:37059 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T08:51:44,208 INFO [RS:0;469387a2cdb6:37059 {}] regionserver.HRegionServer(3091): Received CLOSE for 74de31bc5c2e2d20fe1a281b677a200f 2024-11-24T08:51:44,208 INFO [RS:0;469387a2cdb6:37059 {}] regionserver.HRegionServer(3091): Received CLOSE for bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:44,208 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 74de31bc5c2e2d20fe1a281b677a200f, disabling compactions & flushes 2024-11-24T08:51:44,208 INFO [RS:0;469387a2cdb6:37059 {}] regionserver.HRegionServer(959): stopping server 469387a2cdb6,37059,1732438246097 2024-11-24T08:51:44,208 INFO [RS:0;469387a2cdb6:37059 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:51:44,208 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732438271497.74de31bc5c2e2d20fe1a281b677a200f. 2024-11-24T08:51:44,208 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732438271497.74de31bc5c2e2d20fe1a281b677a200f. 2024-11-24T08:51:44,208 INFO [RS:0;469387a2cdb6:37059 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;469387a2cdb6:37059. 2024-11-24T08:51:44,208 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732438271497.74de31bc5c2e2d20fe1a281b677a200f. after waiting 0 ms 2024-11-24T08:51:44,209 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732438271497.74de31bc5c2e2d20fe1a281b677a200f. 2024-11-24T08:51:44,209 DEBUG [RS:0;469387a2cdb6:37059 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:51:44,209 DEBUG [RS:0;469387a2cdb6:37059 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:51:44,209 INFO [RS:0;469387a2cdb6:37059 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T08:51:44,209 INFO [RS:0;469387a2cdb6:37059 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T08:51:44,209 INFO [RS:0;469387a2cdb6:37059 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T08:51:44,209 INFO [RS:0;469387a2cdb6:37059 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T08:51:44,209 INFO [RS:0;469387a2cdb6:37059 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-24T08:51:44,209 DEBUG [RS:0;469387a2cdb6:37059 {}] regionserver.HRegionServer(1325): Online Regions={74de31bc5c2e2d20fe1a281b677a200f=TestLogRolling-testLogRolling,,1732438271497.74de31bc5c2e2d20fe1a281b677a200f., bfc44d74913e973cb7e0bc7d91d76b5e=TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e., 1588230740=hbase:meta,,1.1588230740} 2024-11-24T08:51:44,210 DEBUG [RS:0;469387a2cdb6:37059 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 74de31bc5c2e2d20fe1a281b677a200f, bfc44d74913e973cb7e0bc7d91d76b5e 2024-11-24T08:51:44,209 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732438271497.74de31bc5c2e2d20fe1a281b677a200f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/74de31bc5c2e2d20fe1a281b677a200f/info/84b8ddb34028407a9729ff723a5c2847.778da2ad8102f000e6527ce33247b88f->hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/84b8ddb34028407a9729ff723a5c2847-bottom] to archive 2024-11-24T08:51:44,210 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:51:44,210 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:51:44,210 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:51:44,210 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:51:44,210 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:51:44,211 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732438271497.74de31bc5c2e2d20fe1a281b677a200f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T08:51:44,213 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732438271497.74de31bc5c2e2d20fe1a281b677a200f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/74de31bc5c2e2d20fe1a281b677a200f/info/84b8ddb34028407a9729ff723a5c2847.778da2ad8102f000e6527ce33247b88f to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/74de31bc5c2e2d20fe1a281b677a200f/info/84b8ddb34028407a9729ff723a5c2847.778da2ad8102f000e6527ce33247b88f 2024-11-24T08:51:44,214 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732438271497.74de31bc5c2e2d20fe1a281b677a200f.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=469387a2cdb6:34461 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-24T08:51:44,214 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732438271497.74de31bc5c2e2d20fe1a281b677a200f.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-24T08:51:44,216 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-24T08:51:44,217 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:51:44,217 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:51:44,217 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732438304209Running coprocessor pre-close hooks at 1732438304209Disabling compacts and flushes for region at 1732438304210 (+1 ms)Disabling writes for close at 1732438304210Writing region close event to WAL at 1732438304212 (+2 ms)Running coprocessor post-close hooks at 1732438304217 (+5 ms)Closed at 1732438304217 2024-11-24T08:51:44,217 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T08:51:44,218 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/74de31bc5c2e2d20fe1a281b677a200f/recovered.edits/134.seqid, newMaxSeqId=134, maxSeqId=129 2024-11-24T08:51:44,218 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732438271497.74de31bc5c2e2d20fe1a281b677a200f. 2024-11-24T08:51:44,218 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 74de31bc5c2e2d20fe1a281b677a200f: Waiting for close lock at 1732438304208Running coprocessor pre-close hooks at 1732438304208Disabling compacts and flushes for region at 1732438304208Disabling writes for close at 1732438304208Writing region close event to WAL at 1732438304214 (+6 ms)Running coprocessor post-close hooks at 1732438304218 (+4 ms)Closed at 1732438304218 2024-11-24T08:51:44,218 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732438271497.74de31bc5c2e2d20fe1a281b677a200f. 2024-11-24T08:51:44,219 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing bfc44d74913e973cb7e0bc7d91d76b5e, disabling compactions & flushes 2024-11-24T08:51:44,219 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e. 2024-11-24T08:51:44,219 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e. 2024-11-24T08:51:44,219 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e. after waiting 0 ms 2024-11-24T08:51:44,219 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e. 2024-11-24T08:51:44,219 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/84b8ddb34028407a9729ff723a5c2847.778da2ad8102f000e6527ce33247b88f->hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/778da2ad8102f000e6527ce33247b88f/info/84b8ddb34028407a9729ff723a5c2847-top, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-d04aa409f1a54d419652ba9cd9039cf1, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-c248268c80f04d76a87c7fc4f85bc13b, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/3aea9ff02ad84640a887b787f621c025, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-fce9998727864933865500e725c4420d, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/bf2aec0fb6c8482ea2d261bd55082cf8, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/25b132620d7941cdb79956de896e4e3d, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/d7780ba5dbb846328ca85f6778cc84f1, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/8852c8f897704eecb340e87dc6d35c49, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/354025f7c45a46b7848eb2585c479790, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/c058270569c24ef29a926b629522f81e, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/1b05fda6356e4ed6b2562e0564e5539f, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/f80b33389efd45af82144c3060abede7, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/e931c5b53d4f4de28cef2bf98c81d164, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/1a311ebd95974e538350b81251381994, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/9b862f55c20b4235a4de0dd0885daf41, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/f9b2d84d88a544dda681c8ef2b512e4a, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/f6c5a49dc0594fe9a31cf500c77e72e7, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/22eaecfe597c4597a7ee0991d1117ea1, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/5be248d08eb6490b9f57f3d338724036, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/fa12a0fa3dc1441186a935b011d8bee2, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/d6a6eeb2d7e342b793dfb40c1514306f, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/35060008655241c7b36d1ae1a7634e91, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/edb4aeb1f0a2454db285ce25acf0462e, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/ed85f7b860cb4b7585de569c7091c4aa] to archive 2024-11-24T08:51:44,220 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T08:51:44,221 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/84b8ddb34028407a9729ff723a5c2847.778da2ad8102f000e6527ce33247b88f to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/84b8ddb34028407a9729ff723a5c2847.778da2ad8102f000e6527ce33247b88f 2024-11-24T08:51:44,223 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-d04aa409f1a54d419652ba9cd9039cf1 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-d04aa409f1a54d419652ba9cd9039cf1 2024-11-24T08:51:44,224 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-c248268c80f04d76a87c7fc4f85bc13b to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-c248268c80f04d76a87c7fc4f85bc13b 2024-11-24T08:51:44,224 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/3aea9ff02ad84640a887b787f621c025 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/3aea9ff02ad84640a887b787f621c025 2024-11-24T08:51:44,226 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-fce9998727864933865500e725c4420d to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/TestLogRolling-testLogRolling=778da2ad8102f000e6527ce33247b88f-fce9998727864933865500e725c4420d 2024-11-24T08:51:44,227 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/bf2aec0fb6c8482ea2d261bd55082cf8 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/bf2aec0fb6c8482ea2d261bd55082cf8 2024-11-24T08:51:44,228 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/25b132620d7941cdb79956de896e4e3d to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/25b132620d7941cdb79956de896e4e3d 2024-11-24T08:51:44,229 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/d7780ba5dbb846328ca85f6778cc84f1 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/d7780ba5dbb846328ca85f6778cc84f1 2024-11-24T08:51:44,230 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/8852c8f897704eecb340e87dc6d35c49 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/8852c8f897704eecb340e87dc6d35c49 2024-11-24T08:51:44,231 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/354025f7c45a46b7848eb2585c479790 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/354025f7c45a46b7848eb2585c479790 2024-11-24T08:51:44,232 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/c058270569c24ef29a926b629522f81e to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/c058270569c24ef29a926b629522f81e 2024-11-24T08:51:44,233 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/1b05fda6356e4ed6b2562e0564e5539f to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/1b05fda6356e4ed6b2562e0564e5539f 2024-11-24T08:51:44,234 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/f80b33389efd45af82144c3060abede7 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/f80b33389efd45af82144c3060abede7 2024-11-24T08:51:44,235 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/e931c5b53d4f4de28cef2bf98c81d164 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/e931c5b53d4f4de28cef2bf98c81d164 2024-11-24T08:51:44,236 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/1a311ebd95974e538350b81251381994 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/1a311ebd95974e538350b81251381994 2024-11-24T08:51:44,237 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/9b862f55c20b4235a4de0dd0885daf41 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/9b862f55c20b4235a4de0dd0885daf41 2024-11-24T08:51:44,238 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/f9b2d84d88a544dda681c8ef2b512e4a to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/f9b2d84d88a544dda681c8ef2b512e4a 2024-11-24T08:51:44,239 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/f6c5a49dc0594fe9a31cf500c77e72e7 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/f6c5a49dc0594fe9a31cf500c77e72e7 2024-11-24T08:51:44,240 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/22eaecfe597c4597a7ee0991d1117ea1 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/22eaecfe597c4597a7ee0991d1117ea1 2024-11-24T08:51:44,241 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/5be248d08eb6490b9f57f3d338724036 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/5be248d08eb6490b9f57f3d338724036 2024-11-24T08:51:44,242 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/fa12a0fa3dc1441186a935b011d8bee2 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/fa12a0fa3dc1441186a935b011d8bee2 2024-11-24T08:51:44,243 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/d6a6eeb2d7e342b793dfb40c1514306f to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/d6a6eeb2d7e342b793dfb40c1514306f 2024-11-24T08:51:44,243 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/35060008655241c7b36d1ae1a7634e91 to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/35060008655241c7b36d1ae1a7634e91 2024-11-24T08:51:44,244 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/edb4aeb1f0a2454db285ce25acf0462e to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/edb4aeb1f0a2454db285ce25acf0462e 2024-11-24T08:51:44,245 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/ed85f7b860cb4b7585de569c7091c4aa to hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/archive/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/info/ed85f7b860cb4b7585de569c7091c4aa 2024-11-24T08:51:44,245 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [3aea9ff02ad84640a887b787f621c025=43081, bf2aec0fb6c8482ea2d261bd55082cf8=12516, 25b132620d7941cdb79956de896e4e3d=66967, d7780ba5dbb846328ca85f6778cc84f1=21156, 8852c8f897704eecb340e87dc6d35c49=16828, 354025f7c45a46b7848eb2585c479790=91940, c058270569c24ef29a926b629522f81e=17906, 1b05fda6356e4ed6b2562e0564e5539f=24394, f80b33389efd45af82144c3060abede7=118996, e931c5b53d4f4de28cef2bf98c81d164=12516, 1a311ebd95974e538350b81251381994=16828, 9b862f55c20b4235a4de0dd0885daf41=141941, f9b2d84d88a544dda681c8ef2b512e4a=15750, f6c5a49dc0594fe9a31cf500c77e72e7=12518, 22eaecfe597c4597a7ee0991d1117ea1=166859, 5be248d08eb6490b9f57f3d338724036=22254, fa12a0fa3dc1441186a935b011d8bee2=17918, d6a6eeb2d7e342b793dfb40c1514306f=187470, 35060008655241c7b36d1ae1a7634e91=12523, edb4aeb1f0a2454db285ce25acf0462e=19013, ed85f7b860cb4b7585de569c7091c4aa=16839] 2024-11-24T08:51:44,249 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/data/default/TestLogRolling-testLogRolling/bfc44d74913e973cb7e0bc7d91d76b5e/recovered.edits/345.seqid, newMaxSeqId=345, maxSeqId=129 2024-11-24T08:51:44,249 INFO [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e. 2024-11-24T08:51:44,249 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for bfc44d74913e973cb7e0bc7d91d76b5e: Waiting for close lock at 1732438304219Running coprocessor pre-close hooks at 1732438304219Disabling compacts and flushes for region at 1732438304219Disabling writes for close at 1732438304219Writing region close event to WAL at 1732438304246 (+27 ms)Running coprocessor post-close hooks at 1732438304249 (+3 ms)Closed at 1732438304249 2024-11-24T08:51:44,250 DEBUG [RS_CLOSE_REGION-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732438271497.bfc44d74913e973cb7e0bc7d91d76b5e. 2024-11-24T08:51:44,410 INFO [RS:0;469387a2cdb6:37059 {}] regionserver.HRegionServer(976): stopping server 469387a2cdb6,37059,1732438246097; all regions closed. 2024-11-24T08:51:44,410 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,411 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,411 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,411 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,411 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741834_1010 (size=8107) 2024-11-24T08:51:44,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741834_1010 (size=8107) 2024-11-24T08:51:44,416 DEBUG [RS:0;469387a2cdb6:37059 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/oldWALs 2024-11-24T08:51:44,417 INFO [RS:0;469387a2cdb6:37059 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 469387a2cdb6%2C37059%2C1732438246097.meta:.meta(num 1732438246992) 2024-11-24T08:51:44,417 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,417 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,417 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,417 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,418 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741881_1057 (size=780) 2024-11-24T08:51:44,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741881_1057 (size=780) 2024-11-24T08:51:44,423 DEBUG [RS:0;469387a2cdb6:37059 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/oldWALs 2024-11-24T08:51:44,423 INFO [RS:0;469387a2cdb6:37059 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 469387a2cdb6%2C37059%2C1732438246097:(num 1732438304094) 2024-11-24T08:51:44,423 DEBUG [RS:0;469387a2cdb6:37059 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:51:44,423 INFO [RS:0;469387a2cdb6:37059 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:51:44,423 INFO [RS:0;469387a2cdb6:37059 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:51:44,423 INFO [RS:0;469387a2cdb6:37059 {}] hbase.ChoreService(370): Chore service for: regionserver/469387a2cdb6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-24T08:51:44,423 INFO [RS:0;469387a2cdb6:37059 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:51:44,423 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:51:44,423 INFO [RS:0;469387a2cdb6:37059 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37059 2024-11-24T08:51:44,425 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x10070ed1ed10001, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/469387a2cdb6,37059,1732438246097 2024-11-24T08:51:44,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:51:44,425 INFO [RS:0;469387a2cdb6:37059 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:51:44,426 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [469387a2cdb6,37059,1732438246097] 2024-11-24T08:51:44,427 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/469387a2cdb6,37059,1732438246097 already deleted, retry=false 2024-11-24T08:51:44,427 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 469387a2cdb6,37059,1732438246097 expired; onlineServers=0 2024-11-24T08:51:44,427 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '469387a2cdb6,34461,1732438246017' ***** 2024-11-24T08:51:44,427 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T08:51:44,427 INFO [M:0;469387a2cdb6:34461 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:51:44,427 INFO [M:0;469387a2cdb6:34461 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:51:44,427 DEBUG [M:0;469387a2cdb6:34461 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T08:51:44,427 DEBUG [M:0;469387a2cdb6:34461 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T08:51:44,427 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T08:51:44,428 DEBUG [master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.small.0-1732438246349 {}] cleaner.HFileCleaner(306): Exit Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.small.0-1732438246349,5,FailOnTimeoutGroup] 2024-11-24T08:51:44,428 DEBUG [master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.large.0-1732438246345 {}] cleaner.HFileCleaner(306): Exit Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.large.0-1732438246345,5,FailOnTimeoutGroup] 2024-11-24T08:51:44,428 INFO [M:0;469387a2cdb6:34461 {}] hbase.ChoreService(370): Chore service for: master/469387a2cdb6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T08:51:44,428 INFO [M:0;469387a2cdb6:34461 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:51:44,428 DEBUG [M:0;469387a2cdb6:34461 {}] master.HMaster(1795): Stopping service threads 2024-11-24T08:51:44,428 INFO [M:0;469387a2cdb6:34461 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T08:51:44,428 INFO [M:0;469387a2cdb6:34461 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:51:44,428 INFO [M:0;469387a2cdb6:34461 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T08:51:44,428 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T08:51:44,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T08:51:44,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:51:44,428 DEBUG [M:0;469387a2cdb6:34461 {}] zookeeper.ZKUtil(347): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T08:51:44,428 WARN [M:0;469387a2cdb6:34461 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T08:51:44,429 INFO [M:0;469387a2cdb6:34461 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/.lastflushedseqids 2024-11-24T08:51:44,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741882_1058 (size=228) 2024-11-24T08:51:44,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741882_1058 (size=228) 2024-11-24T08:51:44,434 INFO [M:0;469387a2cdb6:34461 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T08:51:44,434 INFO [M:0;469387a2cdb6:34461 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T08:51:44,434 DEBUG [M:0;469387a2cdb6:34461 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:51:44,435 INFO [M:0;469387a2cdb6:34461 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:51:44,435 DEBUG [M:0;469387a2cdb6:34461 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:51:44,435 DEBUG [M:0;469387a2cdb6:34461 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:51:44,435 DEBUG [M:0;469387a2cdb6:34461 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:51:44,435 INFO [M:0;469387a2cdb6:34461 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=53.71 KB heapSize=65.95 KB 2024-11-24T08:51:44,448 DEBUG [M:0;469387a2cdb6:34461 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/20ed9c47689c41efa143f1b2010e2f44 is 82, key is hbase:meta,,1/info:regioninfo/1732438247017/Put/seqid=0 2024-11-24T08:51:44,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741883_1059 (size=5672) 2024-11-24T08:51:44,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741883_1059 (size=5672) 2024-11-24T08:51:44,453 INFO [M:0;469387a2cdb6:34461 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/20ed9c47689c41efa143f1b2010e2f44 2024-11-24T08:51:44,471 DEBUG [M:0;469387a2cdb6:34461 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f941993f1124427e88d6ba558019f1f7 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732438247517/Put/seqid=0 2024-11-24T08:51:44,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741884_1060 (size=7680) 2024-11-24T08:51:44,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741884_1060 (size=7680) 2024-11-24T08:51:44,476 INFO [M:0;469387a2cdb6:34461 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.11 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f941993f1124427e88d6ba558019f1f7 2024-11-24T08:51:44,481 INFO [M:0;469387a2cdb6:34461 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f941993f1124427e88d6ba558019f1f7 2024-11-24T08:51:44,481 INFO [regionserver/469387a2cdb6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:51:44,495 DEBUG [M:0;469387a2cdb6:34461 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a1c7a21ba2b248a9a717845a6eedb5e6 is 69, key is 469387a2cdb6,37059,1732438246097/rs:state/1732438246455/Put/seqid=0 2024-11-24T08:51:44,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741885_1061 (size=5156) 2024-11-24T08:51:44,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741885_1061 (size=5156) 2024-11-24T08:51:44,500 INFO [M:0;469387a2cdb6:34461 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a1c7a21ba2b248a9a717845a6eedb5e6 2024-11-24T08:51:44,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:44,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:44,524 DEBUG [M:0;469387a2cdb6:34461 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f8b5300cc67f437abcfa90a111b98d56 is 52, key is load_balancer_on/state:d/1732438247137/Put/seqid=0 2024-11-24T08:51:44,526 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x10070ed1ed10001, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:51:44,526 INFO [RS:0;469387a2cdb6:37059 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:51:44,526 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x10070ed1ed10001, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:51:44,526 INFO [RS:0;469387a2cdb6:37059 {}] regionserver.HRegionServer(1031): Exiting; stopping=469387a2cdb6,37059,1732438246097; zookeeper connection closed. 2024-11-24T08:51:44,527 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5a5c37c4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5a5c37c4 2024-11-24T08:51:44,527 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T08:51:44,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741886_1062 (size=5056) 2024-11-24T08:51:44,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741886_1062 (size=5056) 2024-11-24T08:51:44,528 INFO [M:0;469387a2cdb6:34461 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f8b5300cc67f437abcfa90a111b98d56 2024-11-24T08:51:44,533 DEBUG [M:0;469387a2cdb6:34461 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/20ed9c47689c41efa143f1b2010e2f44 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/20ed9c47689c41efa143f1b2010e2f44 2024-11-24T08:51:44,537 INFO [M:0;469387a2cdb6:34461 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/20ed9c47689c41efa143f1b2010e2f44, entries=8, sequenceid=129, filesize=5.5 K 2024-11-24T08:51:44,538 DEBUG [M:0;469387a2cdb6:34461 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f941993f1124427e88d6ba558019f1f7 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f941993f1124427e88d6ba558019f1f7 2024-11-24T08:51:44,542 INFO [M:0;469387a2cdb6:34461 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f941993f1124427e88d6ba558019f1f7 2024-11-24T08:51:44,543 INFO [M:0;469387a2cdb6:34461 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f941993f1124427e88d6ba558019f1f7, entries=14, sequenceid=129, filesize=7.5 K 2024-11-24T08:51:44,544 DEBUG [M:0;469387a2cdb6:34461 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a1c7a21ba2b248a9a717845a6eedb5e6 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a1c7a21ba2b248a9a717845a6eedb5e6 2024-11-24T08:51:44,548 INFO [M:0;469387a2cdb6:34461 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a1c7a21ba2b248a9a717845a6eedb5e6, entries=1, sequenceid=129, filesize=5.0 K 2024-11-24T08:51:44,549 DEBUG [M:0;469387a2cdb6:34461 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f8b5300cc67f437abcfa90a111b98d56 as hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f8b5300cc67f437abcfa90a111b98d56 2024-11-24T08:51:44,553 INFO [M:0;469387a2cdb6:34461 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45341/user/jenkins/test-data/f5d29d9e-5e7a-989c-2089-46e0a300bf53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f8b5300cc67f437abcfa90a111b98d56, entries=1, sequenceid=129, filesize=4.9 K 2024-11-24T08:51:44,554 INFO [M:0;469387a2cdb6:34461 {}] regionserver.HRegion(3140): Finished flush of dataSize ~53.71 KB/54997, heapSize ~65.89 KB/67472, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 119ms, sequenceid=129, compaction requested=false 2024-11-24T08:51:44,556 INFO [M:0;469387a2cdb6:34461 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:51:44,556 DEBUG [M:0;469387a2cdb6:34461 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732438304434Disabling compacts and flushes for region at 1732438304434Disabling writes for close at 1732438304435 (+1 ms)Obtaining lock to block concurrent updates at 1732438304435Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732438304435Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=54997, getHeapSize=67472, getOffHeapSize=0, getCellsCount=152 at 1732438304435Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732438304436 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732438304436Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732438304448 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732438304448Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732438304457 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732438304471 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732438304471Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732438304481 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732438304495 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732438304495Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732438304504 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732438304523 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732438304523Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7ac56a15: reopening flushed file at 1732438304532 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2223ae07: reopening flushed file at 1732438304537 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c418559: reopening flushed file at 1732438304543 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2738b8f3: reopening flushed file at 1732438304548 (+5 ms)Finished flush of dataSize ~53.71 KB/54997, heapSize ~65.89 KB/67472, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 119ms, sequenceid=129, compaction requested=false at 1732438304554 (+6 ms)Writing region close event to WAL at 1732438304555 (+1 ms)Closed at 1732438304555 2024-11-24T08:51:44,556 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,556 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,556 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,556 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,556 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:44,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37939 is added to blk_1073741830_1006 (size=63927) 2024-11-24T08:51:44,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741830_1006 (size=63927) 2024-11-24T08:51:44,559 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:51:44,559 INFO [M:0;469387a2cdb6:34461 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T08:51:44,559 INFO [M:0;469387a2cdb6:34461 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34461 2024-11-24T08:51:44,559 INFO [M:0;469387a2cdb6:34461 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:51:44,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:51:44,660 INFO [M:0;469387a2cdb6:34461 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:51:44,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x10070ed1ed10000, quorum=127.0.0.1:55564, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:51:44,662 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5baa5e87{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:51:44,663 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@240b8237{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:51:44,663 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:51:44,663 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c149881{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:51:44,663 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74b22f54{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/hadoop.log.dir/,STOPPED} 2024-11-24T08:51:44,665 WARN [BP-619707123-172.17.0.2-1732438245419 heartbeating to localhost/127.0.0.1:45341 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:51:44,665 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:51:44,665 WARN [BP-619707123-172.17.0.2-1732438245419 heartbeating to localhost/127.0.0.1:45341 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-619707123-172.17.0.2-1732438245419 (Datanode Uuid f396ea77-d025-489a-b6b6-9445118fba37) service to localhost/127.0.0.1:45341 2024-11-24T08:51:44,665 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:51:44,665 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/cluster_d32e16b6-9103-829c-f8cb-579260fe909a/data/data3/current/BP-619707123-172.17.0.2-1732438245419 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:51:44,665 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/cluster_d32e16b6-9103-829c-f8cb-579260fe909a/data/data4/current/BP-619707123-172.17.0.2-1732438245419 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:51:44,665 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:51:44,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5492219{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:51:44,668 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@231b9bd6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:51:44,668 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:51:44,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@384851d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:51:44,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@53da1c3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/hadoop.log.dir/,STOPPED} 2024-11-24T08:51:44,669 WARN [BP-619707123-172.17.0.2-1732438245419 heartbeating to localhost/127.0.0.1:45341 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:51:44,669 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:51:44,669 WARN [BP-619707123-172.17.0.2-1732438245419 heartbeating to localhost/127.0.0.1:45341 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-619707123-172.17.0.2-1732438245419 (Datanode Uuid 7ef7d38a-49f5-46b5-99ef-212199663221) service to localhost/127.0.0.1:45341 2024-11-24T08:51:44,669 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:51:44,670 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/cluster_d32e16b6-9103-829c-f8cb-579260fe909a/data/data1/current/BP-619707123-172.17.0.2-1732438245419 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:51:44,670 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/cluster_d32e16b6-9103-829c-f8cb-579260fe909a/data/data2/current/BP-619707123-172.17.0.2-1732438245419 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:51:44,670 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:51:44,675 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3d9cf385{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:51:44,675 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@35058e3e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:51:44,675 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:51:44,676 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@79af711a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:51:44,676 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@547e6321{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/hadoop.log.dir/,STOPPED} 2024-11-24T08:51:44,682 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T08:51:44,718 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T08:51:44,726 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=229 (was 205) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45341 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45341 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:45341 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45341 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45341 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:45341 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:45341 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:45341 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=512 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=134 (was 116) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1363 (was 1501) 2024-11-24T08:51:44,733 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=229, OpenFileDescriptor=512, MaxFileDescriptor=1048576, SystemLoadAverage=134, ProcessCount=11, AvailableMemoryMB=1363 2024-11-24T08:51:44,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T08:51:44,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/hadoop.log.dir so I do NOT create it in target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16 2024-11-24T08:51:44,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fe43f61-4e6d-c150-4a5d-200ea7791561/hadoop.tmp.dir so I do NOT create it in target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16 2024-11-24T08:51:44,733 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/cluster_d26d7200-8011-14dc-b29a-71add540439b, deleteOnExit=true 2024-11-24T08:51:44,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T08:51:44,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/test.cache.data in system properties and HBase conf 2024-11-24T08:51:44,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T08:51:44,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/hadoop.log.dir in system properties and HBase conf 2024-11-24T08:51:44,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T08:51:44,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T08:51:44,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T08:51:44,734 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T08:51:44,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:51:44,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:51:44,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T08:51:44,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:51:44,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T08:51:44,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T08:51:44,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:51:44,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:51:44,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T08:51:44,735 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/nfs.dump.dir in system properties and HBase conf 2024-11-24T08:51:44,735 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/java.io.tmpdir in system properties and HBase conf 2024-11-24T08:51:44,735 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:51:44,735 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T08:51:44,735 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T08:51:44,746 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:51:44,792 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:51:44,812 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:51:44,814 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:51:44,814 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:51:44,814 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:51:44,814 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:51:44,815 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f31fc1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:51:44,815 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2474cb91{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:51:44,907 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@273b5f82{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/java.io.tmpdir/jetty-localhost-40643-hadoop-hdfs-3_4_1-tests_jar-_-any-13675385398515755583/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:51:44,908 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2834d4ee{HTTP/1.1, (http/1.1)}{localhost:40643} 2024-11-24T08:51:44,908 INFO [Time-limited test {}] server.Server(415): Started @295525ms 2024-11-24T08:51:44,919 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:51:44,985 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:51:44,989 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:51:44,990 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:51:44,990 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:51:44,991 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:51:44,991 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3de3f342{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:51:44,991 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3cb23f43{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:51:45,123 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1b42ae65{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/java.io.tmpdir/jetty-localhost-41179-hadoop-hdfs-3_4_1-tests_jar-_-any-9011548015241194463/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:51:45,123 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4dfe66f3{HTTP/1.1, (http/1.1)}{localhost:41179} 2024-11-24T08:51:45,124 INFO [Time-limited test {}] server.Server(415): Started @295740ms 2024-11-24T08:51:45,125 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:51:45,190 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:51:45,193 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:51:45,197 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:51:45,197 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:51:45,197 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:51:45,198 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ca0473d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:51:45,198 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3fbc1bd8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:51:45,214 WARN [Thread-2490 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/cluster_d26d7200-8011-14dc-b29a-71add540439b/data/data2/current/BP-1298695448-172.17.0.2-1732438304750/current, will proceed with Du for space computation calculation, 2024-11-24T08:51:45,214 WARN [Thread-2489 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/cluster_d26d7200-8011-14dc-b29a-71add540439b/data/data1/current/BP-1298695448-172.17.0.2-1732438304750/current, will proceed with Du for space computation calculation, 2024-11-24T08:51:45,235 WARN [Thread-2468 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:51:45,242 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x42f8ec8e90ccd942 with lease ID 0xc98339b87f6d7e78: Processing first storage report for DS-3b4eef2c-4fd2-4d30-956a-0112522301eb from datanode DatanodeRegistration(127.0.0.1:36869, datanodeUuid=a23a109e-2c09-45a3-b680-0040e3685271, infoPort=45315, infoSecurePort=0, ipcPort=39767, storageInfo=lv=-57;cid=testClusterID;nsid=1739120313;c=1732438304750) 2024-11-24T08:51:45,242 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x42f8ec8e90ccd942 with lease ID 0xc98339b87f6d7e78: from storage DS-3b4eef2c-4fd2-4d30-956a-0112522301eb node DatanodeRegistration(127.0.0.1:36869, datanodeUuid=a23a109e-2c09-45a3-b680-0040e3685271, infoPort=45315, infoSecurePort=0, ipcPort=39767, storageInfo=lv=-57;cid=testClusterID;nsid=1739120313;c=1732438304750), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:51:45,242 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x42f8ec8e90ccd942 with lease ID 0xc98339b87f6d7e78: Processing first storage report for DS-19abaf3c-8b29-4479-b612-745ff0076b35 from datanode DatanodeRegistration(127.0.0.1:36869, datanodeUuid=a23a109e-2c09-45a3-b680-0040e3685271, infoPort=45315, infoSecurePort=0, ipcPort=39767, storageInfo=lv=-57;cid=testClusterID;nsid=1739120313;c=1732438304750) 2024-11-24T08:51:45,242 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x42f8ec8e90ccd942 with lease ID 0xc98339b87f6d7e78: from storage DS-19abaf3c-8b29-4479-b612-745ff0076b35 node DatanodeRegistration(127.0.0.1:36869, datanodeUuid=a23a109e-2c09-45a3-b680-0040e3685271, infoPort=45315, infoSecurePort=0, ipcPort=39767, storageInfo=lv=-57;cid=testClusterID;nsid=1739120313;c=1732438304750), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T08:51:45,318 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:51:45,318 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T08:51:45,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T08:51:45,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-24T08:51:45,326 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4ab6850e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/java.io.tmpdir/jetty-localhost-42407-hadoop-hdfs-3_4_1-tests_jar-_-any-7408854229740055093/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:51:45,327 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6ebfc7ad{HTTP/1.1, (http/1.1)}{localhost:42407} 2024-11-24T08:51:45,327 INFO [Time-limited test {}] server.Server(415): Started @295944ms 2024-11-24T08:51:45,328 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:51:45,406 WARN [Thread-2516 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/cluster_d26d7200-8011-14dc-b29a-71add540439b/data/data4/current/BP-1298695448-172.17.0.2-1732438304750/current, will proceed with Du for space computation calculation, 2024-11-24T08:51:45,406 WARN [Thread-2515 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/cluster_d26d7200-8011-14dc-b29a-71add540439b/data/data3/current/BP-1298695448-172.17.0.2-1732438304750/current, will proceed with Du for space computation calculation, 2024-11-24T08:51:45,428 WARN [Thread-2504 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:51:45,431 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x557c66a5cb8c9d2c with lease ID 0xc98339b87f6d7e79: Processing first storage report for DS-0b3d2bb1-5fef-427d-9654-98fcee0f46b6 from datanode DatanodeRegistration(127.0.0.1:44197, datanodeUuid=b83f2aaf-7443-4456-9079-2b0616a6bf83, infoPort=40169, infoSecurePort=0, ipcPort=42205, storageInfo=lv=-57;cid=testClusterID;nsid=1739120313;c=1732438304750) 2024-11-24T08:51:45,431 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x557c66a5cb8c9d2c with lease ID 0xc98339b87f6d7e79: from storage DS-0b3d2bb1-5fef-427d-9654-98fcee0f46b6 node DatanodeRegistration(127.0.0.1:44197, datanodeUuid=b83f2aaf-7443-4456-9079-2b0616a6bf83, infoPort=40169, infoSecurePort=0, ipcPort=42205, storageInfo=lv=-57;cid=testClusterID;nsid=1739120313;c=1732438304750), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:51:45,431 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x557c66a5cb8c9d2c with lease ID 0xc98339b87f6d7e79: Processing first storage report for DS-bc4ca32d-1811-4772-84c2-4dcd8dab630a from datanode DatanodeRegistration(127.0.0.1:44197, datanodeUuid=b83f2aaf-7443-4456-9079-2b0616a6bf83, infoPort=40169, infoSecurePort=0, ipcPort=42205, storageInfo=lv=-57;cid=testClusterID;nsid=1739120313;c=1732438304750) 2024-11-24T08:51:45,431 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x557c66a5cb8c9d2c with lease ID 0xc98339b87f6d7e79: from storage DS-bc4ca32d-1811-4772-84c2-4dcd8dab630a node DatanodeRegistration(127.0.0.1:44197, datanodeUuid=b83f2aaf-7443-4456-9079-2b0616a6bf83, infoPort=40169, infoSecurePort=0, ipcPort=42205, storageInfo=lv=-57;cid=testClusterID;nsid=1739120313;c=1732438304750), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:51:45,466 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16 2024-11-24T08:51:45,473 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/cluster_d26d7200-8011-14dc-b29a-71add540439b/zookeeper_0, clientPort=60370, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/cluster_d26d7200-8011-14dc-b29a-71add540439b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/cluster_d26d7200-8011-14dc-b29a-71add540439b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T08:51:45,475 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60370 2024-11-24T08:51:45,475 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:51:45,476 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:51:45,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:51:45,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36869 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:51:45,489 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24 with version=8 2024-11-24T08:51:45,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34511/user/jenkins/test-data/8f8e7194-5e4d-e22f-7a4c-49d5624831c9/hbase-staging 2024-11-24T08:51:45,491 INFO [Time-limited test {}] client.ConnectionUtils(128): master/469387a2cdb6:0 server-side Connection retries=45 2024-11-24T08:51:45,491 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:51:45,491 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:51:45,491 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:51:45,491 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:51:45,492 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:51:45,492 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T08:51:45,492 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:51:45,492 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37723 2024-11-24T08:51:45,493 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37723 connecting to ZooKeeper ensemble=127.0.0.1:60370 2024-11-24T08:51:45,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:377230x0, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:51:45,506 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37723-0x10070ee072b0000 connected 2024-11-24T08:51:45,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:45,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:45,531 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:51:45,532 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:51:45,535 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:51:45,535 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24, hbase.cluster.distributed=false 2024-11-24T08:51:45,537 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:51:45,538 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37723 2024-11-24T08:51:45,539 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37723 2024-11-24T08:51:45,540 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37723 2024-11-24T08:51:45,541 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37723 2024-11-24T08:51:45,543 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37723 2024-11-24T08:51:45,560 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/469387a2cdb6:0 server-side Connection retries=45 2024-11-24T08:51:45,560 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:51:45,560 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:51:45,560 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:51:45,560 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:51:45,560 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:51:45,560 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T08:51:45,560 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:51:45,561 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40605 2024-11-24T08:51:45,562 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40605 connecting to ZooKeeper ensemble=127.0.0.1:60370 2024-11-24T08:51:45,563 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:51:45,565 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:51:45,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:406050x0, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:51:45,570 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40605-0x10070ee072b0001 connected 2024-11-24T08:51:45,570 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40605-0x10070ee072b0001, quorum=127.0.0.1:60370, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:51:45,570 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T08:51:45,572 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T08:51:45,573 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40605-0x10070ee072b0001, quorum=127.0.0.1:60370, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T08:51:45,574 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40605-0x10070ee072b0001, quorum=127.0.0.1:60370, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:51:45,577 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40605 2024-11-24T08:51:45,577 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40605 2024-11-24T08:51:45,578 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40605 2024-11-24T08:51:45,581 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40605 2024-11-24T08:51:45,581 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40605 2024-11-24T08:51:45,603 DEBUG [M:0;469387a2cdb6:37723 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;469387a2cdb6:37723 2024-11-24T08:51:45,603 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/469387a2cdb6,37723,1732438305491 2024-11-24T08:51:45,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40605-0x10070ee072b0001, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:51:45,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:51:45,605 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/469387a2cdb6,37723,1732438305491 2024-11-24T08:51:45,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:51:45,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40605-0x10070ee072b0001, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T08:51:45,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40605-0x10070ee072b0001, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:51:45,607 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T08:51:45,607 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/469387a2cdb6,37723,1732438305491 from backup master directory 2024-11-24T08:51:45,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40605-0x10070ee072b0001, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:51:45,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/469387a2cdb6,37723,1732438305491 2024-11-24T08:51:45,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:51:45,608 WARN [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:51:45,608 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=469387a2cdb6,37723,1732438305491 2024-11-24T08:51:45,614 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/hbase.id] with ID: 143e8561-3cb2-4244-b1e0-1530970a7b87 2024-11-24T08:51:45,614 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/.tmp/hbase.id 2024-11-24T08:51:45,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36869 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:51:45,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:51:45,629 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/.tmp/hbase.id]:[hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/hbase.id] 2024-11-24T08:51:45,643 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:51:45,644 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T08:51:45,645 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-24T08:51:45,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40605-0x10070ee072b0001, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:51:45,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:51:45,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36869 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:51:45,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:51:45,663 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:51:45,664 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T08:51:45,667 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:51:45,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:51:45,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36869 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:51:45,676 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store 2024-11-24T08:51:45,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36869 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:51:45,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:51:45,687 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:51:45,687 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:51:45,687 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:51:45,687 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:51:45,687 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:51:45,687 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:51:45,687 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:51:45,688 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732438305687Disabling compacts and flushes for region at 1732438305687Disabling writes for close at 1732438305687Writing region close event to WAL at 1732438305687Closed at 1732438305687 2024-11-24T08:51:45,688 WARN [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/.initializing 2024-11-24T08:51:45,688 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/WALs/469387a2cdb6,37723,1732438305491 2024-11-24T08:51:45,691 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=469387a2cdb6%2C37723%2C1732438305491, suffix=, logDir=hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/WALs/469387a2cdb6,37723,1732438305491, archiveDir=hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/oldWALs, maxLogs=10 2024-11-24T08:51:45,691 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C37723%2C1732438305491.1732438305691 2024-11-24T08:51:45,697 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/WALs/469387a2cdb6,37723,1732438305491/469387a2cdb6%2C37723%2C1732438305491.1732438305691 2024-11-24T08:51:45,699 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40169:40169),(127.0.0.1/127.0.0.1:45315:45315)] 2024-11-24T08:51:45,702 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:51:45,702 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:51:45,702 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:51:45,702 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:51:45,704 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:51:45,705 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T08:51:45,705 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:51:45,705 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:51:45,706 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:51:45,707 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T08:51:45,707 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:51:45,707 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:51:45,707 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:51:45,708 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T08:51:45,708 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:51:45,709 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:51:45,709 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:51:45,710 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T08:51:45,710 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:51:45,710 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:51:45,710 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:51:45,711 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:51:45,711 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:51:45,712 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:51:45,712 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:51:45,713 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T08:51:45,714 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:51:45,716 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:51:45,716 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=769353, jitterRate=-0.021717488765716553}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T08:51:45,717 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732438305702Initializing all the Stores at 1732438305703 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438305703Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438305704 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438305704Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438305704Cleaning up temporary data from old regions at 1732438305712 (+8 ms)Region opened successfully at 1732438305717 (+5 ms) 2024-11-24T08:51:45,717 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T08:51:45,719 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61b81a50, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=469387a2cdb6/172.17.0.2:0 2024-11-24T08:51:45,720 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T08:51:45,720 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T08:51:45,721 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T08:51:45,721 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T08:51:45,721 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T08:51:45,722 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T08:51:45,722 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T08:51:45,724 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T08:51:45,725 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T08:51:45,726 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T08:51:45,726 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T08:51:45,727 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T08:51:45,727 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T08:51:45,727 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T08:51:45,728 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T08:51:45,729 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T08:51:45,730 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T08:51:45,730 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T08:51:45,732 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T08:51:45,733 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T08:51:45,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40605-0x10070ee072b0001, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:51:45,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:51:45,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40605-0x10070ee072b0001, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:51:45,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:51:45,734 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=469387a2cdb6,37723,1732438305491, sessionid=0x10070ee072b0000, setting cluster-up flag (Was=false) 2024-11-24T08:51:45,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40605-0x10070ee072b0001, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:51:45,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:51:45,738 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T08:51:45,739 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=469387a2cdb6,37723,1732438305491 2024-11-24T08:51:45,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:51:45,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40605-0x10070ee072b0001, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:51:45,743 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T08:51:45,744 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=469387a2cdb6,37723,1732438305491 2024-11-24T08:51:45,744 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T08:51:45,746 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T08:51:45,746 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T08:51:45,746 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T08:51:45,746 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 469387a2cdb6,37723,1732438305491 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T08:51:45,747 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:51:45,747 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:51:45,747 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:51:45,747 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/469387a2cdb6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:51:45,748 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/469387a2cdb6:0, corePoolSize=10, maxPoolSize=10 2024-11-24T08:51:45,748 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:51:45,748 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/469387a2cdb6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:51:45,748 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:51:45,749 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:51:45,749 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T08:51:45,750 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732438335750 2024-11-24T08:51:45,750 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T08:51:45,750 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:51:45,751 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T08:51:45,751 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T08:51:45,751 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T08:51:45,751 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T08:51:45,751 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T08:51:45,751 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T08:51:45,751 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:51:45,751 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T08:51:45,751 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T08:51:45,751 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T08:51:45,752 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T08:51:45,752 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T08:51:45,752 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.large.0-1732438305752,5,FailOnTimeoutGroup] 2024-11-24T08:51:45,752 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.small.0-1732438305752,5,FailOnTimeoutGroup] 2024-11-24T08:51:45,752 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:51:45,752 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T08:51:45,752 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T08:51:45,752 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T08:51:45,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:51:45,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36869 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:51:45,759 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T08:51:45,759 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24 2024-11-24T08:51:45,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36869 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:51:45,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:51:45,768 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:51:45,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:51:45,770 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:51:45,770 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:51:45,771 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:51:45,771 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:51:45,772 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:51:45,772 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:51:45,772 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:51:45,772 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:51:45,773 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:51:45,773 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:51:45,774 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:51:45,774 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:51:45,775 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:51:45,775 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:51:45,776 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:51:45,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:51:45,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/data/hbase/meta/1588230740 2024-11-24T08:51:45,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/data/hbase/meta/1588230740 2024-11-24T08:51:45,778 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:51:45,778 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:51:45,778 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:51:45,779 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:51:45,781 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:51:45,781 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=839804, jitterRate=0.06786656379699707}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:51:45,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732438305768Initializing all the Stores at 1732438305769 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438305769Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438305769Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438305769Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438305769Cleaning up temporary data from old regions at 1732438305778 (+9 ms)Region opened successfully at 1732438305781 (+3 ms) 2024-11-24T08:51:45,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:51:45,782 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:51:45,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:51:45,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:51:45,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:51:45,782 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:51:45,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732438305782Disabling compacts and flushes for region at 1732438305782Disabling writes for close at 1732438305782Writing region close event to WAL at 1732438305782Closed at 1732438305782 2024-11-24T08:51:45,783 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:51:45,783 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T08:51:45,783 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T08:51:45,784 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:51:45,785 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T08:51:45,786 INFO [RS:0;469387a2cdb6:40605 {}] regionserver.HRegionServer(746): ClusterId : 143e8561-3cb2-4244-b1e0-1530970a7b87 2024-11-24T08:51:45,786 DEBUG [RS:0;469387a2cdb6:40605 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T08:51:45,788 DEBUG [RS:0;469387a2cdb6:40605 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T08:51:45,788 DEBUG [RS:0;469387a2cdb6:40605 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T08:51:45,789 DEBUG [RS:0;469387a2cdb6:40605 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T08:51:45,789 DEBUG [RS:0;469387a2cdb6:40605 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67be9742, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=469387a2cdb6/172.17.0.2:0 2024-11-24T08:51:45,800 DEBUG [RS:0;469387a2cdb6:40605 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;469387a2cdb6:40605 2024-11-24T08:51:45,800 INFO [RS:0;469387a2cdb6:40605 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T08:51:45,800 INFO [RS:0;469387a2cdb6:40605 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T08:51:45,800 DEBUG [RS:0;469387a2cdb6:40605 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T08:51:45,801 INFO [RS:0;469387a2cdb6:40605 {}] regionserver.HRegionServer(2659): reportForDuty to master=469387a2cdb6,37723,1732438305491 with port=40605, startcode=1732438305559 2024-11-24T08:51:45,801 DEBUG [RS:0;469387a2cdb6:40605 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T08:51:45,803 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48667, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T08:51:45,804 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37723 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 469387a2cdb6,40605,1732438305559 2024-11-24T08:51:45,804 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37723 {}] master.ServerManager(517): Registering regionserver=469387a2cdb6,40605,1732438305559 2024-11-24T08:51:45,805 DEBUG [RS:0;469387a2cdb6:40605 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24 2024-11-24T08:51:45,805 DEBUG [RS:0;469387a2cdb6:40605 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40803 2024-11-24T08:51:45,805 DEBUG [RS:0;469387a2cdb6:40605 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T08:51:45,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:51:45,807 DEBUG [RS:0;469387a2cdb6:40605 {}] zookeeper.ZKUtil(111): regionserver:40605-0x10070ee072b0001, quorum=127.0.0.1:60370, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/469387a2cdb6,40605,1732438305559 2024-11-24T08:51:45,807 WARN [RS:0;469387a2cdb6:40605 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:51:45,807 INFO [RS:0;469387a2cdb6:40605 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:51:45,807 DEBUG [RS:0;469387a2cdb6:40605 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/WALs/469387a2cdb6,40605,1732438305559 2024-11-24T08:51:45,807 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [469387a2cdb6,40605,1732438305559] 2024-11-24T08:51:45,810 INFO [RS:0;469387a2cdb6:40605 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T08:51:45,811 INFO [RS:0;469387a2cdb6:40605 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T08:51:45,812 INFO [RS:0;469387a2cdb6:40605 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T08:51:45,812 INFO [RS:0;469387a2cdb6:40605 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:51:45,812 INFO [RS:0;469387a2cdb6:40605 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T08:51:45,812 INFO [RS:0;469387a2cdb6:40605 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T08:51:45,813 INFO [RS:0;469387a2cdb6:40605 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T08:51:45,813 DEBUG [RS:0;469387a2cdb6:40605 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:51:45,813 DEBUG [RS:0;469387a2cdb6:40605 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:51:45,813 DEBUG [RS:0;469387a2cdb6:40605 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:51:45,813 DEBUG [RS:0;469387a2cdb6:40605 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:51:45,813 DEBUG [RS:0;469387a2cdb6:40605 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:51:45,813 DEBUG [RS:0;469387a2cdb6:40605 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/469387a2cdb6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:51:45,813 DEBUG [RS:0;469387a2cdb6:40605 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:51:45,813 DEBUG [RS:0;469387a2cdb6:40605 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:51:45,813 DEBUG [RS:0;469387a2cdb6:40605 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:51:45,813 DEBUG [RS:0;469387a2cdb6:40605 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:51:45,813 DEBUG [RS:0;469387a2cdb6:40605 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:51:45,813 DEBUG [RS:0;469387a2cdb6:40605 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/469387a2cdb6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:51:45,813 DEBUG [RS:0;469387a2cdb6:40605 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/469387a2cdb6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:51:45,813 DEBUG [RS:0;469387a2cdb6:40605 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/469387a2cdb6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:51:45,814 INFO [RS:0;469387a2cdb6:40605 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:51:45,814 INFO [RS:0;469387a2cdb6:40605 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:51:45,814 INFO [RS:0;469387a2cdb6:40605 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:51:45,814 INFO [RS:0;469387a2cdb6:40605 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T08:51:45,814 INFO [RS:0;469387a2cdb6:40605 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T08:51:45,814 INFO [RS:0;469387a2cdb6:40605 {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,40605,1732438305559-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:51:45,833 INFO [RS:0;469387a2cdb6:40605 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T08:51:45,833 INFO [RS:0;469387a2cdb6:40605 {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,40605,1732438305559-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:51:45,833 INFO [RS:0;469387a2cdb6:40605 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:51:45,833 INFO [RS:0;469387a2cdb6:40605 {}] regionserver.Replication(171): 469387a2cdb6,40605,1732438305559 started 2024-11-24T08:51:45,851 INFO [RS:0;469387a2cdb6:40605 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:51:45,851 INFO [RS:0;469387a2cdb6:40605 {}] regionserver.HRegionServer(1482): Serving as 469387a2cdb6,40605,1732438305559, RpcServer on 469387a2cdb6/172.17.0.2:40605, sessionid=0x10070ee072b0001 2024-11-24T08:51:45,851 DEBUG [RS:0;469387a2cdb6:40605 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T08:51:45,851 DEBUG [RS:0;469387a2cdb6:40605 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 469387a2cdb6,40605,1732438305559 2024-11-24T08:51:45,851 DEBUG [RS:0;469387a2cdb6:40605 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '469387a2cdb6,40605,1732438305559' 2024-11-24T08:51:45,851 DEBUG [RS:0;469387a2cdb6:40605 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T08:51:45,852 DEBUG [RS:0;469387a2cdb6:40605 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T08:51:45,852 DEBUG [RS:0;469387a2cdb6:40605 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T08:51:45,852 DEBUG [RS:0;469387a2cdb6:40605 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T08:51:45,852 DEBUG [RS:0;469387a2cdb6:40605 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 469387a2cdb6,40605,1732438305559 2024-11-24T08:51:45,852 DEBUG [RS:0;469387a2cdb6:40605 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '469387a2cdb6,40605,1732438305559' 2024-11-24T08:51:45,852 DEBUG [RS:0;469387a2cdb6:40605 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T08:51:45,853 DEBUG [RS:0;469387a2cdb6:40605 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T08:51:45,853 DEBUG [RS:0;469387a2cdb6:40605 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T08:51:45,853 INFO [RS:0;469387a2cdb6:40605 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T08:51:45,853 INFO [RS:0;469387a2cdb6:40605 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T08:51:45,935 WARN [469387a2cdb6:37723 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T08:51:45,955 INFO [RS:0;469387a2cdb6:40605 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=469387a2cdb6%2C40605%2C1732438305559, suffix=, logDir=hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/WALs/469387a2cdb6,40605,1732438305559, archiveDir=hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/oldWALs, maxLogs=32 2024-11-24T08:51:45,956 INFO [RS:0;469387a2cdb6:40605 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C40605%2C1732438305559.1732438305956 2024-11-24T08:51:45,961 INFO [RS:0;469387a2cdb6:40605 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/WALs/469387a2cdb6,40605,1732438305559/469387a2cdb6%2C40605%2C1732438305559.1732438305956 2024-11-24T08:51:45,967 DEBUG [RS:0;469387a2cdb6:40605 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45315:45315),(127.0.0.1/127.0.0.1:40169:40169)] 2024-11-24T08:51:46,185 DEBUG [469387a2cdb6:37723 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T08:51:46,186 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=469387a2cdb6,40605,1732438305559 2024-11-24T08:51:46,188 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 469387a2cdb6,40605,1732438305559, state=OPENING 2024-11-24T08:51:46,190 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T08:51:46,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:51:46,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40605-0x10070ee072b0001, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:51:46,192 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:51:46,193 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:51:46,193 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:51:46,193 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=469387a2cdb6,40605,1732438305559}] 2024-11-24T08:51:46,347 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T08:51:46,349 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38065, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T08:51:46,354 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T08:51:46,354 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:51:46,356 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=469387a2cdb6%2C40605%2C1732438305559.meta, suffix=.meta, logDir=hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/WALs/469387a2cdb6,40605,1732438305559, archiveDir=hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/oldWALs, maxLogs=32 2024-11-24T08:51:46,357 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 469387a2cdb6%2C40605%2C1732438305559.meta.1732438306357.meta 2024-11-24T08:51:46,363 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/WALs/469387a2cdb6,40605,1732438305559/469387a2cdb6%2C40605%2C1732438305559.meta.1732438306357.meta 2024-11-24T08:51:46,365 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40169:40169),(127.0.0.1/127.0.0.1:45315:45315)] 2024-11-24T08:51:46,368 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:51:46,368 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T08:51:46,368 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T08:51:46,369 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T08:51:46,369 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T08:51:46,369 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:51:46,369 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T08:51:46,369 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T08:51:46,370 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:51:46,371 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:51:46,371 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:51:46,372 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:51:46,372 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:51:46,372 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:51:46,372 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:51:46,373 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:51:46,373 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:51:46,373 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:51:46,373 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:51:46,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:51:46,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:51:46,375 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:51:46,375 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:51:46,375 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:51:46,375 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:51:46,376 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/data/hbase/meta/1588230740 2024-11-24T08:51:46,377 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/data/hbase/meta/1588230740 2024-11-24T08:51:46,378 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:51:46,378 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:51:46,378 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:51:46,379 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:51:46,380 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=835839, jitterRate=0.06282457709312439}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:51:46,380 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T08:51:46,380 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732438306369Writing region info on filesystem at 1732438306369Initializing all the Stores at 1732438306370 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438306370Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438306370Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732438306370Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732438306370Cleaning up temporary data from old regions at 1732438306378 (+8 ms)Running coprocessor post-open hooks at 1732438306380 (+2 ms)Region opened successfully at 1732438306380 2024-11-24T08:51:46,381 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732438306347 2024-11-24T08:51:46,383 DEBUG [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T08:51:46,383 INFO [RS_OPEN_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T08:51:46,384 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=469387a2cdb6,40605,1732438305559 2024-11-24T08:51:46,385 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 469387a2cdb6,40605,1732438305559, state=OPEN 2024-11-24T08:51:46,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:51:46,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40605-0x10070ee072b0001, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:51:46,387 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=469387a2cdb6,40605,1732438305559 2024-11-24T08:51:46,387 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:51:46,387 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:51:46,389 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T08:51:46,389 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=469387a2cdb6,40605,1732438305559 in 194 msec 2024-11-24T08:51:46,391 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T08:51:46,391 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 606 msec 2024-11-24T08:51:46,392 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:51:46,392 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T08:51:46,393 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:51:46,393 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=469387a2cdb6,40605,1732438305559, seqNum=-1] 2024-11-24T08:51:46,393 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:51:46,394 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38619, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:51:46,398 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 652 msec 2024-11-24T08:51:46,398 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732438306398, completionTime=-1 2024-11-24T08:51:46,398 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T08:51:46,398 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T08:51:46,400 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T08:51:46,400 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732438366400 2024-11-24T08:51:46,400 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732438426400 2024-11-24T08:51:46,400 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-24T08:51:46,400 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,37723,1732438305491-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:51:46,400 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,37723,1732438305491-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:51:46,400 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,37723,1732438305491-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:51:46,400 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-469387a2cdb6:37723, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:51:46,400 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T08:51:46,401 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T08:51:46,402 DEBUG [master/469387a2cdb6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T08:51:46,404 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.795sec 2024-11-24T08:51:46,404 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T08:51:46,404 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T08:51:46,404 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T08:51:46,404 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T08:51:46,404 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T08:51:46,404 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,37723,1732438305491-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:51:46,404 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,37723,1732438305491-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T08:51:46,406 DEBUG [master/469387a2cdb6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T08:51:46,406 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T08:51:46,406 INFO [master/469387a2cdb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=469387a2cdb6,37723,1732438305491-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:51:46,488 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3300a62a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:51:46,488 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 469387a2cdb6,37723,-1 for getting cluster id 2024-11-24T08:51:46,488 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T08:51:46,491 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '143e8561-3cb2-4244-b1e0-1530970a7b87' 2024-11-24T08:51:46,491 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T08:51:46,492 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "143e8561-3cb2-4244-b1e0-1530970a7b87" 2024-11-24T08:51:46,492 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5487b8e3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:51:46,492 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [469387a2cdb6,37723,-1] 2024-11-24T08:51:46,493 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T08:51:46,493 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:51:46,495 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34600, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T08:51:46,496 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1750b3d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:51:46,496 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:51:46,497 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=469387a2cdb6,40605,1732438305559, seqNum=-1] 2024-11-24T08:51:46,497 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:51:46,498 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58652, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:51:46,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=469387a2cdb6,37723,1732438305491 2024-11-24T08:51:46,500 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:51:46,503 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T08:51:46,503 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:51:46,506 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/WALs/test.com,8080,1, archiveDir=hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/oldWALs, maxLogs=32 2024-11-24T08:51:46,506 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732438306506 2024-11-24T08:51:46,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,40901,1732438114603/469387a2cdb6%2C40901%2C1732438114603.meta.1732438115380.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:46,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36097/user/jenkins/test-data/7192435c-1923-e222-b897-c8ef87db0f77/WALs/469387a2cdb6,39681,1732438115472/469387a2cdb6%2C39681%2C1732438115472.1732438115667 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:51:46,512 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/WALs/test.com,8080,1/test.com%2C8080%2C1.1732438306506 2024-11-24T08:51:46,514 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45315:45315),(127.0.0.1/127.0.0.1:40169:40169)] 2024-11-24T08:51:46,515 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732438306515 2024-11-24T08:51:46,521 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,521 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,521 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,521 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,521 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,521 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/WALs/test.com,8080,1/test.com%2C8080%2C1.1732438306506 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/WALs/test.com,8080,1/test.com%2C8080%2C1.1732438306515 2024-11-24T08:51:46,523 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40169:40169),(127.0.0.1/127.0.0.1:45315:45315)] 2024-11-24T08:51:46,523 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/WALs/test.com,8080,1/test.com%2C8080%2C1.1732438306506 is not closed yet, will try archiving it next time 2024-11-24T08:51:46,523 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,524 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,524 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741835_1011 (size=93) 2024-11-24T08:51:46,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36869 is added to blk_1073741835_1011 (size=93) 2024-11-24T08:51:46,525 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,525 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,525 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/WALs/test.com,8080,1/test.com%2C8080%2C1.1732438306506 to hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/oldWALs/test.com%2C8080%2C1.1732438306506 2024-11-24T08:51:46,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36869 is added to blk_1073741836_1012 (size=93) 2024-11-24T08:51:46,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741836_1012 (size=93) 2024-11-24T08:51:46,529 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/oldWALs 2024-11-24T08:51:46,529 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732438306515) 2024-11-24T08:51:46,529 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T08:51:46,529 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:51:46,529 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:51:46,529 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:51:46,529 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:51:46,529 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T08:51:46,529 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T08:51:46,529 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1678478337, stopped=false 2024-11-24T08:51:46,529 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=469387a2cdb6,37723,1732438305491 2024-11-24T08:51:46,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:51:46,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40605-0x10070ee072b0001, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:51:46,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:51:46,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40605-0x10070ee072b0001, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:51:46,530 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:51:46,530 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:51:46,531 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:51:46,531 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:51:46,531 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '469387a2cdb6,40605,1732438305559' ***** 2024-11-24T08:51:46,531 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T08:51:46,531 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40605-0x10070ee072b0001, quorum=127.0.0.1:60370, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:51:46,531 INFO [RS:0;469387a2cdb6:40605 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T08:51:46,531 INFO [RS:0;469387a2cdb6:40605 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T08:51:46,531 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T08:51:46,531 INFO [RS:0;469387a2cdb6:40605 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T08:51:46,531 INFO [RS:0;469387a2cdb6:40605 {}] regionserver.HRegionServer(959): stopping server 469387a2cdb6,40605,1732438305559 2024-11-24T08:51:46,531 INFO [RS:0;469387a2cdb6:40605 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:51:46,531 INFO [RS:0;469387a2cdb6:40605 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;469387a2cdb6:40605. 2024-11-24T08:51:46,532 DEBUG [RS:0;469387a2cdb6:40605 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:51:46,532 DEBUG [RS:0;469387a2cdb6:40605 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:51:46,532 INFO [RS:0;469387a2cdb6:40605 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T08:51:46,532 INFO [RS:0;469387a2cdb6:40605 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T08:51:46,532 INFO [RS:0;469387a2cdb6:40605 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T08:51:46,532 INFO [RS:0;469387a2cdb6:40605 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T08:51:46,532 INFO [RS:0;469387a2cdb6:40605 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-24T08:51:46,532 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:51:46,532 DEBUG [RS:0;469387a2cdb6:40605 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-24T08:51:46,532 DEBUG [RS:0;469387a2cdb6:40605 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-24T08:51:46,532 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:51:46,532 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:51:46,532 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:51:46,532 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:51:46,532 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:51:46,532 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-24T08:51:46,547 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/data/hbase/meta/1588230740/.tmp/ns/eefcd6bd2c7e42c0a19ff799c8a15f6e is 43, key is default/ns:d/1732438306394/Put/seqid=0 2024-11-24T08:51:46,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741837_1013 (size=5153) 2024-11-24T08:51:46,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36869 is added to blk_1073741837_1013 (size=5153) 2024-11-24T08:51:46,552 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/data/hbase/meta/1588230740/.tmp/ns/eefcd6bd2c7e42c0a19ff799c8a15f6e 2024-11-24T08:51:46,558 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/data/hbase/meta/1588230740/.tmp/ns/eefcd6bd2c7e42c0a19ff799c8a15f6e as hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/data/hbase/meta/1588230740/ns/eefcd6bd2c7e42c0a19ff799c8a15f6e 2024-11-24T08:51:46,562 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/data/hbase/meta/1588230740/ns/eefcd6bd2c7e42c0a19ff799c8a15f6e, entries=2, sequenceid=6, filesize=5.0 K 2024-11-24T08:51:46,563 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 31ms, sequenceid=6, compaction requested=false 2024-11-24T08:51:46,563 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T08:51:46,567 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-24T08:51:46,568 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:51:46,568 INFO [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:51:46,568 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732438306532Running coprocessor pre-close hooks at 1732438306532Disabling compacts and flushes for region at 1732438306532Disabling writes for close at 1732438306532Obtaining lock to block concurrent updates at 1732438306532Preparing flush snapshotting stores in 1588230740 at 1732438306532Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732438306533 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732438306533Flushing 1588230740/ns: creating writer at 1732438306533Flushing 1588230740/ns: appending metadata at 1732438306547 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732438306547Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1386c1e2: reopening flushed file at 1732438306557 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 31ms, sequenceid=6, compaction requested=false at 1732438306563 (+6 ms)Writing region close event to WAL at 1732438306564 (+1 ms)Running coprocessor post-close hooks at 1732438306568 (+4 ms)Closed at 1732438306568 2024-11-24T08:51:46,568 DEBUG [RS_CLOSE_META-regionserver/469387a2cdb6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T08:51:46,732 INFO [RS:0;469387a2cdb6:40605 {}] regionserver.HRegionServer(976): stopping server 469387a2cdb6,40605,1732438305559; all regions closed. 2024-11-24T08:51:46,734 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,734 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,735 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,735 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,735 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36869 is added to blk_1073741834_1010 (size=1152) 2024-11-24T08:51:46,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741834_1010 (size=1152) 2024-11-24T08:51:46,744 DEBUG [RS:0;469387a2cdb6:40605 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/oldWALs 2024-11-24T08:51:46,744 INFO [RS:0;469387a2cdb6:40605 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 469387a2cdb6%2C40605%2C1732438305559.meta:.meta(num 1732438306357) 2024-11-24T08:51:46,744 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,744 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,744 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,744 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,744 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741833_1009 (size=93) 2024-11-24T08:51:46,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36869 is added to blk_1073741833_1009 (size=93) 2024-11-24T08:51:46,748 DEBUG [RS:0;469387a2cdb6:40605 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/oldWALs 2024-11-24T08:51:46,748 INFO [RS:0;469387a2cdb6:40605 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 469387a2cdb6%2C40605%2C1732438305559:(num 1732438305956) 2024-11-24T08:51:46,748 DEBUG [RS:0;469387a2cdb6:40605 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:51:46,748 INFO [RS:0;469387a2cdb6:40605 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:51:46,748 INFO [RS:0;469387a2cdb6:40605 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:51:46,748 INFO [RS:0;469387a2cdb6:40605 {}] hbase.ChoreService(370): Chore service for: regionserver/469387a2cdb6:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-24T08:51:46,748 INFO [RS:0;469387a2cdb6:40605 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:51:46,748 INFO [regionserver/469387a2cdb6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:51:46,748 INFO [RS:0;469387a2cdb6:40605 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40605 2024-11-24T08:51:46,750 INFO [RS:0;469387a2cdb6:40605 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:51:46,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40605-0x10070ee072b0001, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/469387a2cdb6,40605,1732438305559 2024-11-24T08:51:46,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:51:46,751 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [469387a2cdb6,40605,1732438305559] 2024-11-24T08:51:46,751 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/469387a2cdb6,40605,1732438305559 already deleted, retry=false 2024-11-24T08:51:46,751 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 469387a2cdb6,40605,1732438305559 expired; onlineServers=0 2024-11-24T08:51:46,751 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '469387a2cdb6,37723,1732438305491' ***** 2024-11-24T08:51:46,751 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T08:51:46,751 INFO [M:0;469387a2cdb6:37723 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:51:46,751 INFO [M:0;469387a2cdb6:37723 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:51:46,752 DEBUG [M:0;469387a2cdb6:37723 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T08:51:46,752 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T08:51:46,752 DEBUG [M:0;469387a2cdb6:37723 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T08:51:46,752 DEBUG [master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.large.0-1732438305752 {}] cleaner.HFileCleaner(306): Exit Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.large.0-1732438305752,5,FailOnTimeoutGroup] 2024-11-24T08:51:46,752 DEBUG [master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.small.0-1732438305752 {}] cleaner.HFileCleaner(306): Exit Thread[master/469387a2cdb6:0:becomeActiveMaster-HFileCleaner.small.0-1732438305752,5,FailOnTimeoutGroup] 2024-11-24T08:51:46,752 INFO [M:0;469387a2cdb6:37723 {}] hbase.ChoreService(370): Chore service for: master/469387a2cdb6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T08:51:46,752 INFO [M:0;469387a2cdb6:37723 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:51:46,752 DEBUG [M:0;469387a2cdb6:37723 {}] master.HMaster(1795): Stopping service threads 2024-11-24T08:51:46,752 INFO [M:0;469387a2cdb6:37723 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T08:51:46,752 INFO [M:0;469387a2cdb6:37723 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:51:46,752 INFO [M:0;469387a2cdb6:37723 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T08:51:46,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T08:51:46,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:51:46,752 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T08:51:46,753 DEBUG [M:0;469387a2cdb6:37723 {}] zookeeper.ZKUtil(347): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T08:51:46,753 WARN [M:0;469387a2cdb6:37723 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T08:51:46,753 INFO [M:0;469387a2cdb6:37723 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/.lastflushedseqids 2024-11-24T08:51:46,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741838_1014 (size=99) 2024-11-24T08:51:46,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36869 is added to blk_1073741838_1014 (size=99) 2024-11-24T08:51:46,759 INFO [M:0;469387a2cdb6:37723 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T08:51:46,759 INFO [M:0;469387a2cdb6:37723 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T08:51:46,759 DEBUG [M:0;469387a2cdb6:37723 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:51:46,759 INFO [M:0;469387a2cdb6:37723 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:51:46,759 DEBUG [M:0;469387a2cdb6:37723 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:51:46,759 DEBUG [M:0;469387a2cdb6:37723 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:51:46,759 DEBUG [M:0;469387a2cdb6:37723 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:51:46,760 INFO [M:0;469387a2cdb6:37723 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-24T08:51:46,780 DEBUG [M:0;469387a2cdb6:37723 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/65fd384408b2450fbf33f877ed9edd27 is 82, key is hbase:meta,,1/info:regioninfo/1732438306384/Put/seqid=0 2024-11-24T08:51:46,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36869 is added to blk_1073741839_1015 (size=5672) 2024-11-24T08:51:46,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741839_1015 (size=5672) 2024-11-24T08:51:46,785 INFO [M:0;469387a2cdb6:37723 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/65fd384408b2450fbf33f877ed9edd27 2024-11-24T08:51:46,804 DEBUG [M:0;469387a2cdb6:37723 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5ff7fbf870fd4c4396c54b8607da3147 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732438306398/Put/seqid=0 2024-11-24T08:51:46,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741840_1016 (size=5275) 2024-11-24T08:51:46,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36869 is added to blk_1073741840_1016 (size=5275) 2024-11-24T08:51:46,809 INFO [M:0;469387a2cdb6:37723 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5ff7fbf870fd4c4396c54b8607da3147 2024-11-24T08:51:46,832 DEBUG [M:0;469387a2cdb6:37723 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7ddf80305b164616acad0c94781799ba is 69, key is 469387a2cdb6,40605,1732438305559/rs:state/1732438305804/Put/seqid=0 2024-11-24T08:51:46,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36869 is added to blk_1073741841_1017 (size=5156) 2024-11-24T08:51:46,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741841_1017 (size=5156) 2024-11-24T08:51:46,837 INFO [M:0;469387a2cdb6:37723 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7ddf80305b164616acad0c94781799ba 2024-11-24T08:51:46,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40605-0x10070ee072b0001, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:51:46,851 INFO [RS:0;469387a2cdb6:40605 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:51:46,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40605-0x10070ee072b0001, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:51:46,851 INFO [RS:0;469387a2cdb6:40605 {}] regionserver.HRegionServer(1031): Exiting; stopping=469387a2cdb6,40605,1732438305559; zookeeper connection closed. 2024-11-24T08:51:46,851 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6576bd65 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6576bd65 2024-11-24T08:51:46,851 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T08:51:46,856 DEBUG [M:0;469387a2cdb6:37723 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d813c7139f394bc1b5a718dc25a634aa is 52, key is load_balancer_on/state:d/1732438306502/Put/seqid=0 2024-11-24T08:51:46,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741842_1018 (size=5056) 2024-11-24T08:51:46,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36869 is added to blk_1073741842_1018 (size=5056) 2024-11-24T08:51:46,861 INFO [M:0;469387a2cdb6:37723 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d813c7139f394bc1b5a718dc25a634aa 2024-11-24T08:51:46,866 DEBUG [M:0;469387a2cdb6:37723 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/65fd384408b2450fbf33f877ed9edd27 as hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/65fd384408b2450fbf33f877ed9edd27 2024-11-24T08:51:46,870 INFO [M:0;469387a2cdb6:37723 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/65fd384408b2450fbf33f877ed9edd27, entries=8, sequenceid=29, filesize=5.5 K 2024-11-24T08:51:46,871 DEBUG [M:0;469387a2cdb6:37723 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5ff7fbf870fd4c4396c54b8607da3147 as hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5ff7fbf870fd4c4396c54b8607da3147 2024-11-24T08:51:46,875 INFO [M:0;469387a2cdb6:37723 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5ff7fbf870fd4c4396c54b8607da3147, entries=3, sequenceid=29, filesize=5.2 K 2024-11-24T08:51:46,876 DEBUG [M:0;469387a2cdb6:37723 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7ddf80305b164616acad0c94781799ba as hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7ddf80305b164616acad0c94781799ba 2024-11-24T08:51:46,880 INFO [M:0;469387a2cdb6:37723 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7ddf80305b164616acad0c94781799ba, entries=1, sequenceid=29, filesize=5.0 K 2024-11-24T08:51:46,881 DEBUG [M:0;469387a2cdb6:37723 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d813c7139f394bc1b5a718dc25a634aa as hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d813c7139f394bc1b5a718dc25a634aa 2024-11-24T08:51:46,885 INFO [M:0;469387a2cdb6:37723 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40803/user/jenkins/test-data/90489432-93cc-e475-231c-e237cb213b24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d813c7139f394bc1b5a718dc25a634aa, entries=1, sequenceid=29, filesize=4.9 K 2024-11-24T08:51:46,886 INFO [M:0;469387a2cdb6:37723 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=29, compaction requested=false 2024-11-24T08:51:46,888 INFO [M:0;469387a2cdb6:37723 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:51:46,888 DEBUG [M:0;469387a2cdb6:37723 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732438306759Disabling compacts and flushes for region at 1732438306759Disabling writes for close at 1732438306759Obtaining lock to block concurrent updates at 1732438306760 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732438306760Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732438306760Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732438306761 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732438306761Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732438306780 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732438306780Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732438306789 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732438306803 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732438306803Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732438306814 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732438306831 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732438306831Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732438306841 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732438306855 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732438306856 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@469da1c5: reopening flushed file at 1732438306865 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6491ba65: reopening flushed file at 1732438306870 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65b07db1: reopening flushed file at 1732438306875 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42ca2520: reopening flushed file at 1732438306880 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=29, compaction requested=false at 1732438306886 (+6 ms)Writing region close event to WAL at 1732438306888 (+2 ms)Closed at 1732438306888 2024-11-24T08:51:46,889 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,889 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,889 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,889 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,890 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:51:46,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36869 is added to blk_1073741830_1006 (size=10311) 2024-11-24T08:51:46,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741830_1006 (size=10311) 2024-11-24T08:51:46,892 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:51:46,892 INFO [M:0;469387a2cdb6:37723 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T08:51:46,892 INFO [M:0;469387a2cdb6:37723 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37723 2024-11-24T08:51:46,892 INFO [M:0;469387a2cdb6:37723 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:51:46,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:51:46,994 INFO [M:0;469387a2cdb6:37723 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:51:46,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10070ee072b0000, quorum=127.0.0.1:60370, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:51:46,998 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4ab6850e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:51:46,999 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6ebfc7ad{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:51:46,999 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:51:46,999 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3fbc1bd8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:51:46,999 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ca0473d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/hadoop.log.dir/,STOPPED} 2024-11-24T08:51:47,001 WARN [BP-1298695448-172.17.0.2-1732438304750 heartbeating to localhost/127.0.0.1:40803 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:51:47,001 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:51:47,001 WARN [BP-1298695448-172.17.0.2-1732438304750 heartbeating to localhost/127.0.0.1:40803 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1298695448-172.17.0.2-1732438304750 (Datanode Uuid b83f2aaf-7443-4456-9079-2b0616a6bf83) service to localhost/127.0.0.1:40803 2024-11-24T08:51:47,001 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:51:47,002 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/cluster_d26d7200-8011-14dc-b29a-71add540439b/data/data3/current/BP-1298695448-172.17.0.2-1732438304750 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:51:47,003 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/cluster_d26d7200-8011-14dc-b29a-71add540439b/data/data4/current/BP-1298695448-172.17.0.2-1732438304750 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:51:47,003 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:51:47,005 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1b42ae65{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:51:47,005 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4dfe66f3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:51:47,005 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:51:47,006 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3cb23f43{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:51:47,006 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3de3f342{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/hadoop.log.dir/,STOPPED} 2024-11-24T08:51:47,007 WARN [BP-1298695448-172.17.0.2-1732438304750 heartbeating to localhost/127.0.0.1:40803 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:51:47,007 WARN [BP-1298695448-172.17.0.2-1732438304750 heartbeating to localhost/127.0.0.1:40803 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1298695448-172.17.0.2-1732438304750 (Datanode Uuid a23a109e-2c09-45a3-b680-0040e3685271) service to localhost/127.0.0.1:40803 2024-11-24T08:51:47,007 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:51:47,007 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:51:47,007 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/cluster_d26d7200-8011-14dc-b29a-71add540439b/data/data1/current/BP-1298695448-172.17.0.2-1732438304750 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:51:47,007 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/cluster_d26d7200-8011-14dc-b29a-71add540439b/data/data2/current/BP-1298695448-172.17.0.2-1732438304750 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:51:47,008 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:51:47,013 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@273b5f82{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:51:47,013 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2834d4ee{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:51:47,013 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:51:47,013 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2474cb91{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:51:47,013 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f31fc1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/612a6546-1d30-c839-09f7-cfe2cb6f0b16/hadoop.log.dir/,STOPPED} 2024-11-24T08:51:47,019 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T08:51:47,035 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T08:51:47,043 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=268 (was 229) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40803 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40803 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:40803 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40803 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:40803 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40803 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:40803 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:40803 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=535 (was 512) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=134 (was 134), ProcessCount=11 (was 11), AvailableMemoryMB=1327 (was 1363)